From d30f796d8c912ce168f4e96d482c56cabd3c907e Mon Sep 17 00:00:00 2001 From: xaxtix Date: Fri, 11 Mar 2022 19:49:54 +0300 Subject: [PATCH] Update to 8.6.0 --- TMessagesProj/build.gradle | 4 +- .../jni/ffmpeg/include/libavcodec/get_bits.h | 2 +- TMessagesProj/jni/intro/IntroRenderer.c | 63 +- .../jni/third_party/usrsctplib/netinet/sctp.h | 22 +- .../usrsctplib/netinet/sctp_asconf.c | 283 +- .../usrsctplib/netinet/sctp_asconf.h | 12 +- .../usrsctplib/netinet/sctp_auth.c | 35 +- .../usrsctplib/netinet/sctp_auth.h | 7 +- .../usrsctplib/netinet/sctp_bsd_addr.c | 92 +- .../usrsctplib/netinet/sctp_bsd_addr.h | 9 +- .../usrsctplib/netinet/sctp_callout.c | 36 +- .../usrsctplib/netinet/sctp_callout.h | 18 +- .../usrsctplib/netinet/sctp_cc_functions.c | 209 +- .../usrsctplib/netinet/sctp_constants.h | 86 +- .../usrsctplib/netinet/sctp_crc32.c | 27 +- .../usrsctplib/netinet/sctp_crc32.h | 8 +- .../usrsctplib/netinet/sctp_header.h | 76 +- .../usrsctplib/netinet/sctp_indata.c | 509 +-- .../usrsctplib/netinet/sctp_indata.h | 30 +- .../usrsctplib/netinet/sctp_input.c | 1567 ++++--- .../usrsctplib/netinet/sctp_input.h | 12 +- .../usrsctplib/netinet/sctp_lock_userspace.h | 23 +- .../third_party/usrsctplib/netinet/sctp_os.h | 17 +- .../usrsctplib/netinet/sctp_os_userspace.h | 165 +- .../usrsctplib/netinet/sctp_output.c | 1153 +++-- .../usrsctplib/netinet/sctp_output.h | 113 +- .../third_party/usrsctplib/netinet/sctp_pcb.c | 1360 +++--- .../third_party/usrsctplib/netinet/sctp_pcb.h | 189 +- .../usrsctplib/netinet/sctp_peeloff.c | 44 +- .../usrsctplib/netinet/sctp_peeloff.h | 6 +- .../usrsctplib/netinet/sctp_process_lock.h | 47 +- .../usrsctplib/netinet/sctp_sha1.c | 22 +- .../usrsctplib/netinet/sctp_sha1.h | 17 +- .../usrsctplib/netinet/sctp_ss_functions.c | 35 +- .../usrsctplib/netinet/sctp_structs.h | 104 +- .../usrsctplib/netinet/sctp_sysctl.c | 203 +- .../usrsctplib/netinet/sctp_sysctl.h | 33 +- .../usrsctplib/netinet/sctp_timer.c | 210 +- .../usrsctplib/netinet/sctp_timer.h | 22 +- .../third_party/usrsctplib/netinet/sctp_uio.h | 160 +- .../usrsctplib/netinet/sctp_userspace.c | 164 +- .../usrsctplib/netinet/sctp_usrreq.c | 1185 +++-- .../third_party/usrsctplib/netinet/sctp_var.h | 106 +- .../third_party/usrsctplib/netinet/sctputil.c | 2221 +++++---- .../third_party/usrsctplib/netinet/sctputil.h | 88 +- .../usrsctplib/netinet6/sctp6_usrreq.c | 410 +- .../usrsctplib/netinet6/sctp6_var.h | 22 +- .../jni/third_party/usrsctplib/user_atomic.h | 8 +- .../third_party/usrsctplib/user_environment.c | 343 +- .../third_party/usrsctplib/user_environment.h | 20 +- .../jni/third_party/usrsctplib/user_inpcb.h | 18 +- .../jni/third_party/usrsctplib/user_ip6_var.h | 22 +- .../jni/third_party/usrsctplib/user_ip_icmp.h | 24 +- .../jni/third_party/usrsctplib/user_malloc.h | 67 +- .../jni/third_party/usrsctplib/user_mbuf.c | 54 +- .../jni/third_party/usrsctplib/user_mbuf.h | 61 +- .../jni/third_party/usrsctplib/user_queue.h | 6 +- .../third_party/usrsctplib/user_recv_thread.c | 242 +- .../jni/third_party/usrsctplib/user_route.h | 2 +- .../jni/third_party/usrsctplib/user_socket.c | 417 +- .../third_party/usrsctplib/user_socketvar.h | 361 +- .../jni/third_party/usrsctplib/user_uma.h | 4 +- .../jni/third_party/usrsctplib/usrsctp.h | 36 +- TMessagesProj/jni/voip/CMakeLists.txt | 77 +- .../jni/voip/libtgvoip/EchoCanceller.cpp | 19 +- .../org_telegram_messenger_voip_Instance.cpp | 42 +- .../jni/voip/tgcalls/CodecSelectHelper.cpp | 6 +- .../jni/voip/tgcalls/EncryptedConnection.cpp | 2 +- .../jni/voip/tgcalls/FakeVideoTrackSource.cpp | 4 +- TMessagesProj/jni/voip/tgcalls/Manager.cpp | 24 +- TMessagesProj/jni/voip/tgcalls/Manager.h | 6 +- .../jni/voip/tgcalls/MediaManager.cpp | 55 +- TMessagesProj/jni/voip/tgcalls/MediaManager.h | 2 +- .../jni/voip/tgcalls/NetworkManager.cpp | 4 +- .../SctpDataChannelProviderInterfaceImpl.cpp | 11 +- .../SctpDataChannelProviderInterfaceImpl.h | 9 +- .../jni/voip/tgcalls/StaticThreads.cpp | 15 +- .../jni/voip/tgcalls/ThreadLocalObject.h | 6 +- .../voip/tgcalls/group/AVIOContextImpl.cpp | 64 + .../jni/voip/tgcalls/group/AVIOContextImpl.h | 40 + .../voip/tgcalls/group/AudioStreamingPart.cpp | 371 +- .../voip/tgcalls/group/AudioStreamingPart.h | 14 +- .../group/AudioStreamingPartInternal.cpp | 367 ++ .../group/AudioStreamingPartInternal.h | 65 + .../AudioStreamingPartPersistentDecoder.cpp | 122 + .../AudioStreamingPartPersistentDecoder.h | 51 + .../tgcalls/group/GroupInstanceCustomImpl.cpp | 330 +- .../tgcalls/group/GroupInstanceCustomImpl.h | 3 +- .../voip/tgcalls/group/GroupInstanceImpl.h | 4 +- .../tgcalls/group/GroupNetworkManager.cpp | 12 +- .../voip/tgcalls/group/GroupNetworkManager.h | 1 + .../tgcalls/group/StreamingMediaContext.cpp | 286 +- .../tgcalls/group/StreamingMediaContext.h | 1 + .../voip/tgcalls/group/VideoStreamingPart.cpp | 165 +- .../voip/tgcalls/group/VideoStreamingPart.h | 14 +- .../voip/tgcalls/platform/PlatformInterface.h | 6 +- .../platform/android/AndroidInterface.cpp | 6 +- .../platform/android/AndroidInterface.h | 2 +- .../tgcalls/platform/fake/FakeInterface.cpp | 4 +- .../tgcalls/platform/fake/FakeInterface.h | 2 +- .../reference/InstanceImplReference.cpp | 1035 ----- .../tgcalls/reference/InstanceImplReference.h | 53 - .../jni/voip/tgcalls/v2/InstanceV2Impl.cpp | 500 ++- .../jni/voip/tgcalls/v2/InstanceV2Impl.h | 2 +- .../voip/tgcalls/v2/NativeNetworkingImpl.cpp | 2 +- .../voip/tgcalls/v2/NativeNetworkingImpl.h | 3 +- .../jni/voip/tgcalls/v2/Signaling.cpp | 53 + TMessagesProj/jni/voip/tgcalls/v2/Signaling.h | 2 + .../voip/webrtc/absl/algorithm/container.h | 281 +- .../jni/voip/webrtc/absl/base/attributes.h | 248 +- .../jni/voip/webrtc/absl/base/call_once.h | 13 +- .../jni/voip/webrtc/absl/base/casts.h | 124 +- .../jni/voip/webrtc/absl/base/config.h | 263 +- .../webrtc/absl/base/dynamic_annotations.cc | 129 - .../webrtc/absl/base/dynamic_annotations.h | 793 ++-- .../jni/voip/webrtc/absl/base/internal/bits.h | 218 - .../webrtc/absl/base/internal/direct_mmap.h | 10 +- .../absl/base/internal/dynamic_annotations.h | 398 ++ .../voip/webrtc/absl/base/internal/endian.h | 61 + .../base/internal/exception_safety_testing.h | 27 +- .../absl/base/internal/exponential_biased.cc | 2 +- .../absl/base/internal/exponential_biased.h | 2 +- .../voip/webrtc/absl/base/internal/invoke.h | 8 +- .../absl/base/internal/low_level_scheduling.h | 35 +- .../webrtc/absl/base/internal/raw_logging.cc | 70 +- .../webrtc/absl/base/internal/raw_logging.h | 22 +- .../webrtc/absl/base/internal/spinlock.cc | 57 +- .../voip/webrtc/absl/base/internal/spinlock.h | 62 +- .../absl/base/internal/spinlock_akaros.inc | 4 +- .../absl/base/internal/spinlock_linux.inc | 14 +- .../absl/base/internal/spinlock_posix.inc | 4 +- .../webrtc/absl/base/internal/spinlock_wait.h | 24 +- .../absl/base/internal/spinlock_win32.inc | 10 +- .../webrtc/absl/base/internal/strerror.cc | 39 +- .../absl/base/internal/strerror_benchmark.cc | 9 - .../voip/webrtc/absl/base/internal/sysinfo.cc | 95 +- .../voip/webrtc/absl/base/internal/sysinfo.h | 8 + .../absl/base/internal/thread_annotations.h | 271 ++ .../absl/base/internal/thread_identity.cc | 15 +- .../absl/base/internal/thread_identity.h | 110 +- .../absl/base/internal/throw_delegate.cc | 118 +- .../absl/base/internal/tsan_mutex_interface.h | 4 +- .../absl/base/internal/unaligned_access.h | 76 - .../absl/base/internal/unscaledcycleclock.cc | 20 +- .../absl/base/internal/unscaledcycleclock.h | 14 +- .../jni/voip/webrtc/absl/base/log_severity.cc | 26 + .../jni/voip/webrtc/absl/base/log_severity.h | 59 +- .../jni/voip/webrtc/absl/base/macros.h | 120 +- .../jni/voip/webrtc/absl/base/optimization.h | 75 +- .../jni/voip/webrtc/absl/base/options.h | 10 +- .../jni/voip/webrtc/absl/base/policy_checks.h | 2 +- .../jni/voip/webrtc/absl/base/port.h | 1 - .../webrtc/absl/base/spinlock_test_common.cc | 21 +- .../webrtc/absl/base/thread_annotations.h | 143 +- .../jni/voip/webrtc/absl/cleanup/cleanup.h | 140 + .../webrtc/absl/cleanup/internal/cleanup.h | 100 + .../webrtc/absl/container/btree_benchmark.cc | 100 +- .../voip/webrtc/absl/container/btree_map.h | 187 +- .../voip/webrtc/absl/container/btree_set.h | 179 +- .../voip/webrtc/absl/container/btree_test.cc | 3071 +++++++++++++ .../voip/webrtc/absl/container/btree_test.h | 166 + .../voip/webrtc/absl/container/fixed_array.h | 48 +- .../webrtc/absl/container/flat_hash_map.h | 15 +- .../webrtc/absl/container/flat_hash_set.h | 25 +- .../webrtc/absl/container/inlined_vector.h | 229 +- .../container/inlined_vector_benchmark.cc | 22 + .../webrtc/absl/container/internal/btree.h | 1765 ++++---- .../absl/container/internal/btree_container.h | 320 +- .../webrtc/absl/container/internal/common.h | 7 +- .../container/internal/compressed_tuple.h | 2 +- .../internal/compressed_tuple_test.cc | 419 ++ .../container/internal/container_memory.h | 65 +- .../internal/container_memory_test.cc | 257 ++ .../container/internal/counting_allocator.h | 69 +- .../internal/hash_function_defaults.h | 32 +- .../internal/hash_function_defaults_test.cc | 383 ++ .../internal/hash_generator_testing.cc | 6 +- .../internal/hash_generator_testing.h | 21 + .../container/internal/hash_policy_traits.h | 31 +- .../container/internal/hashtablez_sampler.cc | 190 +- .../container/internal/hashtablez_sampler.h | 190 +- ...ashtablez_sampler_force_weak_definition.cc | 3 +- .../internal/hashtablez_sampler_test.cc | 428 ++ .../absl/container/internal/inlined_vector.h | 922 ++-- .../webrtc/absl/container/internal/layout.h | 20 +- .../container/internal/layout_benchmark.cc | 122 + .../absl/container/internal/layout_test.cc | 1641 +++++++ .../internal/node_hash_policy_test.cc | 69 + .../absl/container/internal/raw_hash_map.h | 5 +- .../absl/container/internal/raw_hash_set.cc | 23 +- .../absl/container/internal/raw_hash_set.h | 723 +-- .../internal/raw_hash_set_allocator_test.cc | 505 +++ .../internal/raw_hash_set_benchmark.cc | 443 ++ .../internal/raw_hash_set_probe_benchmark.cc | 590 +++ .../container/internal/raw_hash_set_test.cc | 2182 +++++++++ .../internal/test_instance_tracker_test.cc | 184 + .../internal/unordered_map_constructor_test.h | 494 ++ .../internal/unordered_map_lookup_test.h | 117 + .../internal/unordered_map_members_test.h | 87 + .../internal/unordered_map_modifiers_test.h | 352 ++ .../container/internal/unordered_map_test.cc | 50 + .../internal/unordered_set_constructor_test.h | 496 ++ .../internal/unordered_set_lookup_test.h | 91 + .../internal/unordered_set_members_test.h | 86 + .../internal/unordered_set_modifiers_test.h | 221 + .../container/internal/unordered_set_test.cc | 41 + .../webrtc/absl/container/node_hash_map.h | 25 +- .../webrtc/absl/container/node_hash_set.h | 50 +- .../absl/debugging/failure_signal_handler.cc | 44 +- .../absl/debugging/failure_signal_handler.h | 2 +- .../debugging/internal/address_is_readable.cc | 134 +- .../absl/debugging/internal/demangle.cc | 120 +- .../absl/debugging/internal/demangle_test.cc | 219 + .../absl/debugging/internal/elf_mem_image.cc | 23 +- .../absl/debugging/internal/elf_mem_image.h | 8 +- .../absl/debugging/internal/examine_stack.cc | 48 +- .../debugging/internal/stack_consumption.cc | 3 +- .../debugging/internal/stack_consumption.h | 5 +- .../internal/stack_consumption_test.cc | 50 + .../internal/stacktrace_aarch64-inl.inc | 18 +- .../debugging/internal/stacktrace_arm-inl.inc | 28 +- .../debugging/internal/stacktrace_config.h | 66 +- .../internal/stacktrace_emscripten-inl.inc | 110 + .../internal/stacktrace_generic-inl.inc | 28 +- .../internal/stacktrace_powerpc-inl.inc | 18 +- .../internal/stacktrace_riscv-inl.inc | 239 + .../debugging/internal/stacktrace_x86-inl.inc | 43 +- .../absl/debugging/internal/symbolize.h | 41 +- .../absl/debugging/internal/vdso_support.cc | 47 +- .../voip/webrtc/absl/debugging/leak_check.cc | 16 + .../voip/webrtc/absl/debugging/leak_check.h | 22 +- .../voip/webrtc/absl/debugging/stacktrace.cc | 2 + .../voip/webrtc/absl/debugging/symbolize.cc | 20 +- .../absl/debugging/symbolize_darwin.inc | 101 + .../webrtc/absl/debugging/symbolize_elf.inc | 173 +- .../absl/debugging/symbolize_emscripten.inc | 72 + .../voip/webrtc/absl/flags/commandlineflag.cc | 34 + .../voip/webrtc/absl/flags/commandlineflag.h | 200 + .../jni/voip/webrtc/absl/flags/config.h | 35 +- .../jni/voip/webrtc/absl/flags/declare.h | 7 +- .../jni/voip/webrtc/absl/flags/flag.cc | 2 - .../jni/voip/webrtc/absl/flags/flag.h | 240 +- .../voip/webrtc/absl/flags/flag_benchmark.cc | 153 +- .../voip/webrtc/absl/flags/flag_benchmark.lds | 13 + .../jni/voip/webrtc/absl/flags/flag_test.cc | 629 ++- .../voip/webrtc/absl/flags/flag_test_defs.cc | 4 +- .../absl/flags/internal/commandlineflag.cc | 4 - .../absl/flags/internal/commandlineflag.h | 139 +- .../voip/webrtc/absl/flags/internal/flag.cc | 281 +- .../voip/webrtc/absl/flags/internal/flag.h | 511 ++- .../voip/webrtc/absl/flags/internal/parse.h | 8 + .../webrtc/absl/flags/internal/path_util.h | 1 - .../absl/flags/internal/path_util_test.cc | 46 + .../flags/internal/private_handle_accessor.cc | 65 + .../flags/internal/private_handle_accessor.h | 61 + .../absl/flags/internal/program_name_test.cc | 61 + .../webrtc/absl/flags/internal/registry.cc | 351 -- .../webrtc/absl/flags/internal/registry.h | 55 +- .../absl/flags/internal/sequence_lock.h | 187 + .../absl/flags/internal/sequence_lock_test.cc | 169 + .../webrtc/absl/flags/internal/type_erased.cc | 90 - .../webrtc/absl/flags/internal/type_erased.h | 90 - .../voip/webrtc/absl/flags/internal/usage.cc | 353 +- .../voip/webrtc/absl/flags/internal/usage.h | 47 +- .../webrtc/absl/flags/internal/usage_test.cc | 494 ++ .../jni/voip/webrtc/absl/flags/marshalling.cc | 21 +- .../jni/voip/webrtc/absl/flags/marshalling.h | 4 +- .../jni/voip/webrtc/absl/flags/parse.cc | 111 +- .../jni/voip/webrtc/absl/flags/parse.h | 1 - .../jni/voip/webrtc/absl/flags/reflection.cc | 354 ++ .../jni/voip/webrtc/absl/flags/reflection.h | 90 + .../voip/webrtc/absl/flags/usage_config.cc | 6 +- .../jni/voip/webrtc/absl/flags/usage_config.h | 3 +- .../webrtc/absl/functional/function_ref.h | 10 +- .../absl/functional/internal/front_binder.h | 16 +- .../absl/functional/internal/function_ref.h | 4 +- .../jni/voip/webrtc/absl/hash/hash.h | 117 +- .../jni/voip/webrtc/absl/hash/hash_test.cc | 342 +- .../voip/webrtc/absl/hash/internal/city.cc | 27 +- .../jni/voip/webrtc/absl/hash/internal/city.h | 20 +- .../voip/webrtc/absl/hash/internal/hash.cc | 40 +- .../jni/voip/webrtc/absl/hash/internal/hash.h | 525 ++- .../absl/hash/internal/low_level_hash.cc | 123 + .../absl/hash/internal/low_level_hash.h | 50 + .../absl/hash/internal/spy_hash_state.h | 35 + .../jni/voip/webrtc/absl/memory/memory.h | 4 + .../jni/voip/webrtc/absl/meta/type_traits.h | 60 +- .../jni/voip/webrtc/absl/numeric/bits.h | 178 + .../jni/voip/webrtc/absl/numeric/int128.cc | 53 +- .../jni/voip/webrtc/absl/numeric/int128.h | 219 +- .../webrtc/absl/numeric/int128_benchmark.cc | 161 +- .../absl/numeric/int128_have_intrinsic.inc | 44 +- .../absl/numeric/int128_no_intrinsic.inc | 139 +- .../webrtc/absl/numeric/int128_stream_test.cc | 1395 ++++++ .../voip/webrtc/absl/numeric/int128_test.cc | 1261 ++++++ .../voip/webrtc/absl/numeric/internal/bits.h | 358 ++ .../absl/numeric/internal/representation.h | 55 + .../profiling/internal/exponential_biased.cc | 93 + .../profiling/internal/exponential_biased.h | 130 + .../profiling/internal/periodic_sampler.cc | 53 + .../profiling/internal/periodic_sampler.h | 211 + .../absl/profiling/internal/sample_recorder.h | 245 + .../internal/sample_recorder_test.cc | 184 + .../absl/random/bernoulli_distribution.h | 8 +- .../jni/voip/webrtc/absl/random/bit_gen_ref.h | 111 +- .../voip/webrtc/absl/random/distributions.h | 18 +- .../random/internal/distribution_caller.h | 55 +- .../absl/random/internal/distributions.h | 52 - .../absl/random/internal/explicit_seed_seq.h | 1 + .../absl/random/internal/fast_uniform_bits.h | 229 +- .../webrtc/absl/random/internal/fastmath.h | 23 +- .../gaussian_distribution_gentables.cc | 16 +- .../absl/random/internal/generate_real.h | 10 +- .../random/internal/iostream_state_saver.h | 4 +- .../absl/random/internal/mock_overload_set.h | 39 +- .../random/internal/mocking_bit_gen_base.h | 85 - .../webrtc/absl/random/internal/pcg_engine.h | 7 +- .../webrtc/absl/random/internal/pool_urbg.cc | 7 +- .../absl/random/internal/randen-keys.inc | 207 - .../webrtc/absl/random/internal/randen.cc | 2 +- .../voip/webrtc/absl/random/internal/randen.h | 18 +- .../absl/random/internal/randen_detect.cc | 5 +- .../absl/random/internal/randen_engine.h | 82 +- .../absl/random/internal/randen_hwaes.cc | 420 +- .../absl/random/internal/randen_hwaes.h | 2 +- .../absl/random/internal/randen_round_keys.cc | 462 ++ .../absl/random/internal/randen_slow.cc | 667 ++- .../webrtc/absl/random/internal/randen_slow.h | 9 +- .../absl/random/internal/randen_traits.h | 33 +- .../absl/random/internal/seed_material.cc | 50 +- .../voip/webrtc/absl/random/internal/traits.h | 58 +- .../absl/random/internal/uniform_helper.h | 76 +- .../absl/random/internal/wide_multiply.h | 85 +- .../random/log_uniform_int_distribution.h | 16 +- .../webrtc/absl/random/mock_distributions.h | 5 + .../voip/webrtc/absl/random/mocking_bit_gen.h | 208 +- .../webrtc/absl/random/poisson_distribution.h | 11 +- .../jni/voip/webrtc/absl/random/random.h | 2 +- .../absl/random/uniform_int_distribution.h | 6 +- .../webrtc/absl/random/zipf_distribution.h | 7 +- .../absl/status/internal/status_internal.h | 77 + .../absl/status/internal/statusor_internal.h | 396 ++ .../jni/voip/webrtc/absl/status/status.cc | 68 +- .../jni/voip/webrtc/absl/status/status.h | 693 ++- .../absl/status/status_payload_printer.cc | 15 +- .../jni/voip/webrtc/absl/status/statusor.cc | 103 + .../jni/voip/webrtc/absl/status/statusor.h | 776 ++++ .../jni/voip/webrtc/absl/strings/ascii.h | 8 +- .../voip/webrtc/absl/strings/ascii_test.cc | 365 ++ .../jni/voip/webrtc/absl/strings/charconv.cc | 16 +- .../jni/voip/webrtc/absl/strings/charconv.h | 5 +- .../voip/webrtc/absl/strings/charconv_test.cc | 782 ++++ .../jni/voip/webrtc/absl/strings/cord.cc | 1784 +++----- .../jni/voip/webrtc/absl/strings/cord.h | 920 ++-- .../voip/webrtc/absl/strings/cord_analysis.cc | 188 + .../voip/webrtc/absl/strings/cord_analysis.h | 44 + .../absl/strings/cord_ring_reader_test.cc | 180 + .../webrtc/absl/strings/cord_ring_test.cc | 1454 ++++++ .../jni/voip/webrtc/absl/strings/cord_test.cc | 2588 +++++++++++ .../webrtc/absl/strings/cord_test_helpers.h | 62 + .../voip/webrtc/absl/strings/cordz_test.cc | 466 ++ .../webrtc/absl/strings/cordz_test_helpers.h | 151 + .../jni/voip/webrtc/absl/strings/escaping.cc | 8 +- .../voip/webrtc/absl/strings/escaping_test.cc | 664 +++ .../absl/strings/internal/charconv_parse.cc | 14 +- .../absl/strings/internal/cord_data_edge.h | 63 + .../absl/strings/internal/cord_internal.cc | 70 + .../absl/strings/internal/cord_internal.h | 545 ++- .../absl/strings/internal/cord_rep_btree.cc | 1226 +++++ .../absl/strings/internal/cord_rep_btree.h | 924 ++++ .../internal/cord_rep_btree_navigator.cc | 187 + .../internal/cord_rep_btree_navigator.h | 265 ++ .../strings/internal/cord_rep_btree_reader.cc | 69 + .../strings/internal/cord_rep_btree_reader.h | 212 + .../absl/strings/internal/cord_rep_consume.cc | 62 + .../absl/strings/internal/cord_rep_consume.h | 50 + .../absl/strings/internal/cord_rep_crc.cc | 54 + .../absl/strings/internal/cord_rep_crc.h | 102 + .../absl/strings/internal/cord_rep_flat.h | 187 + .../absl/strings/internal/cord_rep_ring.cc | 771 ++++ .../absl/strings/internal/cord_rep_ring.h | 607 +++ .../strings/internal/cord_rep_ring_reader.h | 118 + .../strings/internal/cord_rep_test_util.h | 205 + .../absl/strings/internal/cordz_functions.cc | 96 + .../absl/strings/internal/cordz_functions.h | 85 + .../strings/internal/cordz_functions_test.cc | 149 + .../absl/strings/internal/cordz_handle.cc | 139 + .../absl/strings/internal/cordz_handle.h | 131 + .../strings/internal/cordz_handle_test.cc | 265 ++ .../absl/strings/internal/cordz_info.cc | 416 ++ .../webrtc/absl/strings/internal/cordz_info.h | 298 ++ .../internal/cordz_info_statistics_test.cc | 555 +++ .../absl/strings/internal/cordz_info_test.cc | 341 ++ .../strings/internal/cordz_sample_token.cc | 64 + .../strings/internal/cordz_sample_token.h | 97 + .../internal/cordz_sample_token_test.cc | 208 + .../absl/strings/internal/cordz_statistics.h | 88 + .../strings/internal/cordz_update_scope.h | 71 + .../internal/cordz_update_scope_test.cc | 49 + .../strings/internal/cordz_update_tracker.h | 123 + .../internal/cordz_update_tracker_test.cc | 147 + .../webrtc/absl/strings/internal/escaping.cc | 9 +- .../absl/strings/internal/memutil_test.cc | 179 + .../strings/internal/numbers_test_common.h | 184 + .../absl/strings/internal/ostringstream.cc | 2 +- .../strings/internal/ostringstream_test.cc | 102 + .../strings/internal/pow10_helper_test.cc | 122 + .../strings/internal/resize_uninitialized.h | 50 +- .../internal/resize_uninitialized_test.cc | 133 + .../absl/strings/internal/str_format/arg.cc | 374 +- .../absl/strings/internal/str_format/arg.h | 220 +- .../strings/internal/str_format/arg_test.cc | 130 + .../absl/strings/internal/str_format/bind.cc | 23 +- .../absl/strings/internal/str_format/bind.h | 48 +- .../strings/internal/str_format/bind_test.cc | 157 + .../strings/internal/str_format/checker.h | 44 +- .../internal/str_format/checker_test.cc | 170 + .../internal/str_format/convert_test.cc | 1243 ++++++ .../strings/internal/str_format/extension.cc | 50 +- .../strings/internal/str_format/extension.h | 312 +- .../internal/str_format/extension_test.cc | 98 + .../internal/str_format/float_conversion.cc | 1079 ++++- .../internal/str_format/float_conversion.h | 20 +- .../absl/strings/internal/str_format/output.h | 19 +- .../internal/str_format/output_test.cc | 79 + .../strings/internal/str_format/parser.cc | 159 +- .../absl/strings/internal/str_format/parser.h | 80 +- .../internal/str_format/parser_test.cc | 434 ++ .../absl/strings/internal/str_join_internal.h | 15 +- .../strings/internal/str_split_internal.h | 121 +- .../absl/strings/internal/string_constant.h | 70 + .../strings/internal/string_constant_test.cc | 60 + .../voip/webrtc/absl/strings/internal/utf8.cc | 18 +- .../webrtc/absl/strings/internal/utf8_test.cc | 66 + .../jni/voip/webrtc/absl/strings/match.cc | 9 +- .../jni/voip/webrtc/absl/strings/match.h | 22 +- .../voip/webrtc/absl/strings/match_test.cc | 127 + .../jni/voip/webrtc/absl/strings/numbers.cc | 138 +- .../jni/voip/webrtc/absl/strings/numbers.h | 84 +- .../voip/webrtc/absl/strings/numbers_test.cc | 1521 +++++++ .../jni/voip/webrtc/absl/strings/str_cat.cc | 16 +- .../jni/voip/webrtc/absl/strings/str_cat.h | 30 +- .../webrtc/absl/strings/str_cat_benchmark.cc | 47 + .../voip/webrtc/absl/strings/str_cat_test.cc | 610 +++ .../jni/voip/webrtc/absl/strings/str_format.h | 303 +- .../webrtc/absl/strings/str_format_test.cc | 774 ++++ .../jni/voip/webrtc/absl/strings/str_join.h | 2 +- .../voip/webrtc/absl/strings/str_join_test.cc | 608 +++ .../webrtc/absl/strings/str_replace_test.cc | 341 ++ .../jni/voip/webrtc/absl/strings/str_split.h | 46 +- .../webrtc/absl/strings/str_split_test.cc | 981 ++++ .../voip/webrtc/absl/strings/string_view.cc | 37 +- .../voip/webrtc/absl/strings/string_view.h | 165 +- .../webrtc/absl/strings/string_view_test.cc | 1308 ++++++ .../voip/webrtc/absl/strings/strip_test.cc | 198 + .../voip/webrtc/absl/strings/substitute.cc | 3 +- .../jni/voip/webrtc/absl/strings/substitute.h | 200 +- .../webrtc/absl/strings/substitute_test.cc | 252 ++ .../absl/synchronization/barrier_test.cc | 75 + .../absl/synchronization/blocking_counter.cc | 40 +- .../absl/synchronization/blocking_counter.h | 8 +- .../blocking_counter_benchmark.cc | 83 + .../synchronization/blocking_counter_test.cc | 80 + .../internal/create_thread_identity.cc | 6 +- .../absl/synchronization/internal/futex.h | 154 + .../synchronization/internal/graphcycles.cc | 7 +- .../internal/graphcycles_test.cc | 464 ++ .../synchronization/internal/kernel_timeout.h | 57 +- .../synchronization/internal/mutex_nonprod.cc | 320 -- .../internal/mutex_nonprod.inc | 261 -- .../internal/per_thread_sem.cc | 4 +- .../synchronization/internal/per_thread_sem.h | 10 +- .../internal/per_thread_sem_test.cc | 181 + .../absl/synchronization/internal/waiter.cc | 60 +- .../absl/synchronization/internal/waiter.h | 12 +- .../absl/synchronization/lifetime_test.cc | 181 + .../voip/webrtc/absl/synchronization/mutex.cc | 326 +- .../voip/webrtc/absl/synchronization/mutex.h | 159 +- .../absl/synchronization/mutex_benchmark.cc | 229 +- .../webrtc/absl/synchronization/mutex_test.cc | 1707 +++++++ .../absl/synchronization/notification.h | 5 +- .../absl/synchronization/notification_test.cc | 133 + .../jni/voip/webrtc/absl/time/civil_time.cc | 16 +- .../voip/webrtc/absl/time/civil_time_test.cc | 1243 ++++++ .../jni/voip/webrtc/absl/time/clock.cc | 276 +- .../jni/voip/webrtc/absl/time/clock.h | 4 +- .../jni/voip/webrtc/absl/time/clock_test.cc | 122 + .../jni/voip/webrtc/absl/time/duration.cc | 152 +- .../webrtc/absl/time/duration_benchmark.cc | 16 + .../voip/webrtc/absl/time/duration_test.cc | 1821 ++++++++ .../jni/voip/webrtc/absl/time/format.cc | 77 +- .../voip/webrtc/absl/time/format_benchmark.cc | 2 +- .../jni/voip/webrtc/absl/time/format_test.cc | 441 ++ .../cctz/include/cctz/civil_time_detail.h | 74 +- .../internal/cctz/include/cctz/time_zone.h | 119 +- .../time/internal/cctz/src/cctz_benchmark.cc | 18 +- .../time/internal/cctz/src/civil_time_test.cc | 1066 +++++ .../time/internal/cctz/src/time_zone_fixed.cc | 2 +- .../internal/cctz/src/time_zone_format.cc | 161 +- .../cctz/src/time_zone_format_test.cc | 1676 +++++++ .../time/internal/cctz/src/time_zone_if.h | 3 +- .../time/internal/cctz/src/time_zone_impl.cc | 34 +- .../time/internal/cctz/src/time_zone_info.cc | 363 +- .../time/internal/cctz/src/time_zone_info.h | 9 +- .../time/internal/cctz/src/time_zone_libc.cc | 23 +- .../internal/cctz/src/time_zone_lookup.cc | 49 + .../cctz/src/time_zone_lookup_test.cc | 1459 ++++++ .../absl/time/internal/cctz/src/tzfile.h | 2 +- .../internal/cctz/src/zone_info_source.cc | 2 +- .../webrtc/absl/time/internal/test_util.cc | 1 + .../jni/voip/webrtc/absl/time/time.cc | 7 +- .../jni/voip/webrtc/absl/time/time.h | 184 +- .../jni/voip/webrtc/absl/time/time_test.cc | 1280 ++++++ .../voip/webrtc/absl/time/time_zone_test.cc | 97 + .../jni/voip/webrtc/absl/types/any.h | 54 +- .../absl/types/any_exception_safety_test.cc | 173 + .../jni/voip/webrtc/absl/types/any_test.cc | 781 ++++ .../webrtc/absl/types/bad_optional_access.h | 2 +- .../webrtc/absl/types/bad_variant_access.h | 4 +- .../jni/voip/webrtc/absl/types/compare.h | 6 +- .../voip/webrtc/absl/types/compare_test.cc | 389 ++ .../absl/types/internal/conformance_profile.h | 599 ++- .../absl/types/internal/conformance_testing.h | 1386 ++++++ .../internal/conformance_testing_helpers.h | 391 ++ .../internal/conformance_testing_test.cc | 1556 +++++++ .../webrtc/absl/types/internal/parentheses.h | 34 + .../absl/types/internal/transform_args.h | 246 + .../voip/webrtc/absl/types/internal/variant.h | 10 +- .../types/optional_exception_safety_test.cc | 292 ++ .../voip/webrtc/absl/types/optional_test.cc | 1659 +++++++ .../jni/voip/webrtc/absl/types/span.h | 44 +- .../jni/voip/webrtc/absl/types/span_test.cc | 848 ++++ .../jni/voip/webrtc/absl/types/variant.h | 13 +- .../types/variant_exception_safety_test.cc | 532 +++ .../voip/webrtc/absl/types/variant_test.cc | 2718 +++++++++++ .../jni/voip/webrtc/absl/utility/utility.h | 4 +- .../voip/webrtc/absl/utility/utility_test.cc | 376 ++ TMessagesProj/jni/voip/webrtc/api/OWNERS | 14 +- .../jni/voip/webrtc/api/adaptation/resource.h | 2 +- .../jni/voip/webrtc/api/array_view.h | 26 +- .../jni/voip/webrtc/api/async_dns_resolver.h | 17 +- .../jni/voip/webrtc/api/audio/audio_frame.cc | 26 +- .../jni/voip/webrtc/api/audio/audio_frame.h | 12 +- .../webrtc/api/audio/audio_frame_processor.h | 6 +- .../jni/voip/webrtc/api/audio/audio_mixer.h | 6 +- .../voip/webrtc/api/audio/channel_layout.cc | 2 +- .../api/audio/echo_canceller3_config.cc | 7 + .../webrtc/api/audio/echo_canceller3_config.h | 9 + .../api/audio/echo_canceller3_config_json.cc | 68 +- .../api/audio_codecs/L16/audio_decoder_L16.cc | 15 +- .../api/audio_codecs/L16/audio_decoder_L16.h | 3 +- .../api/audio_codecs/L16/audio_encoder_L16.cc | 13 +- .../api/audio_codecs/L16/audio_encoder_L16.h | 4 +- .../webrtc/api/audio_codecs/audio_decoder.cc | 4 +- .../webrtc/api/audio_codecs/audio_decoder.h | 40 +- .../audio_decoder_factory_template.h | 4 +- .../webrtc/api/audio_codecs/audio_encoder.cc | 3 +- .../webrtc/api/audio_codecs/audio_encoder.h | 13 +- .../audio_encoder_factory_template.h | 4 +- .../webrtc/api/audio_codecs/audio_format.h | 2 +- .../audio_codecs/g711/audio_decoder_g711.cc | 11 +- .../audio_codecs/g711/audio_decoder_g711.h | 4 +- .../audio_codecs/g711/audio_encoder_g711.cc | 11 +- .../audio_codecs/g711/audio_encoder_g711.h | 4 +- .../audio_codecs/g722/audio_decoder_g722.cc | 17 +- .../audio_codecs/g722/audio_encoder_g722.cc | 12 +- .../g722/audio_encoder_g722_config.h | 3 +- .../audio_codecs/ilbc/audio_decoder_ilbc.cc | 9 +- .../audio_codecs/ilbc/audio_encoder_ilbc.cc | 12 +- .../isac/audio_decoder_isac_fix.cc | 9 +- .../isac/audio_decoder_isac_float.cc | 9 +- .../isac/audio_encoder_isac_fix.cc | 9 +- .../isac/audio_encoder_isac_float.cc | 9 +- .../audio_decoder_multi_channel_opus_config.h | 3 +- .../audio_codecs/opus/audio_decoder_opus.cc | 10 +- .../audio_codecs/opus/audio_encoder_opus.cc | 4 + .../opus/audio_encoder_opus_config.h | 8 +- .../jni/voip/webrtc/api/audio_options.cc | 22 +- .../jni/voip/webrtc/api/audio_options.h | 15 +- .../voip/webrtc/api/call/bitrate_allocation.h | 2 +- .../jni/voip/webrtc/api/candidate.cc | 42 +- TMessagesProj/jni/voip/webrtc/api/candidate.h | 53 +- .../api/create_peerconnection_factory.cc | 4 + .../voip/webrtc/api/crypto/crypto_options.cc | 14 +- .../voip/webrtc/api/crypto/crypto_options.h | 2 +- .../voip/webrtc/api/data_channel_interface.cc | 4 + .../voip/webrtc/api/data_channel_interface.h | 27 +- .../webrtc/api/dtls_transport_interface.cc | 18 + .../webrtc/api/dtls_transport_interface.h | 16 + .../voip/webrtc/api/dtmf_sender_interface.h | 20 +- .../jni/voip/webrtc/api/fec_controller.h | 2 +- .../webrtc/api/frame_transformer_interface.h | 17 +- .../voip/webrtc/api/ice_transport_factory.h | 4 +- TMessagesProj/jni/voip/webrtc/api/jsep.h | 18 +- .../jni/voip/webrtc/api/jsep_ice_candidate.h | 9 +- .../webrtc/api/jsep_session_description.h | 8 +- .../voip/webrtc/api/media_stream_interface.h | 20 +- .../jni/voip/webrtc/api/media_types.cc | 2 +- .../jni/voip/webrtc/api/metronome/metronome.h | 65 + .../jni/voip/webrtc/api/neteq/OWNERS | 2 - .../jni/voip/webrtc/api/neteq/neteq.cc | 3 +- .../jni/voip/webrtc/api/neteq/neteq.h | 44 +- .../voip/webrtc/api/neteq/neteq_controller.h | 18 +- .../api/neteq/neteq_controller_factory.h | 2 +- .../jni/voip/webrtc/api/neteq/neteq_factory.h | 2 +- .../api/numerics/samples_stats_counter.h | 6 +- .../voip/webrtc/api/packet_socket_factory.h | 23 +- .../webrtc/api/peer_connection_interface.cc | 6 - .../webrtc/api/peer_connection_interface.h | 309 +- .../jni/voip/webrtc/api/ref_counted_base.h | 11 +- TMessagesProj/jni/voip/webrtc/api/rtc_error.h | 14 +- .../webrtc/api/rtc_event_log/rtc_event.cc | 2 +- .../voip/webrtc/api/rtc_event_log/rtc_event.h | 13 +- .../webrtc/api/rtc_event_log/rtc_event_log.h | 4 +- .../voip/webrtc/api/rtc_event_log_output.h | 2 +- .../jni/voip/webrtc/api/rtp_packet_info.h | 10 +- .../jni/voip/webrtc/api/rtp_packet_infos.h | 15 +- .../jni/voip/webrtc/api/rtp_parameters.cc | 142 +- .../jni/voip/webrtc/api/rtp_parameters.h | 52 +- .../voip/webrtc/api/rtp_receiver_interface.h | 37 +- .../voip/webrtc/api/rtp_sender_interface.h | 28 - .../webrtc/api/rtp_transceiver_interface.cc | 35 +- .../webrtc/api/rtp_transceiver_interface.h | 13 +- .../jni/voip/webrtc/api/scoped_refptr.h | 10 +- ...set_local_description_observer_interface.h | 2 +- ...et_remote_description_observer_interface.h | 2 +- .../jni/voip/webrtc/api/stats/OWNERS | 2 - .../jni/voip/webrtc/api/stats/rtc_stats.h | 63 +- .../voip/webrtc/api/stats/rtc_stats_report.h | 16 +- .../voip/webrtc/api/stats/rtcstats_objects.h | 50 +- .../jni/voip/webrtc/api/stats_types.cc | 29 +- .../jni/voip/webrtc/api/stats_types.h | 56 +- .../voip/webrtc/api/task_queue/queued_task.h | 6 +- .../webrtc/api/task_queue/task_queue_base.h | 75 +- .../test/audio_quality_analyzer_interface.h | 4 +- .../voip/webrtc/api/test/audioproc_float.h | 14 +- .../webrtc/api/test/compile_all_headers.cc | 2 + .../webrtc/api/test/create_frame_generator.h | 6 +- ...connection_quality_test_frame_generator.cc | 2 +- ..._connection_quality_test_frame_generator.h | 2 +- ...eate_peerconnection_quality_test_fixture.h | 6 +- .../webrtc/api/test/create_time_controller.cc | 9 +- .../webrtc/api/test/create_time_controller.h | 2 +- .../webrtc/api/test/dummy_peer_connection.h | 8 +- .../api/test/frame_generator_interface.cc | 2 +- .../webrtc/api/test/mock_async_dns_resolver.h | 6 +- .../voip/webrtc/api/test/mock_audio_sink.h | 44 + .../voip/webrtc/api/test/mock_data_channel.h | 3 +- .../api/test/mock_media_stream_interface.h | 6 +- .../mock_peer_connection_factory_interface.h | 7 +- .../api/test/mock_peerconnectioninterface.h | 33 +- .../webrtc/api/test/mock_rtp_transceiver.h | 6 +- .../voip/webrtc/api/test/mock_rtpreceiver.h | 12 +- .../jni/voip/webrtc/api/test/mock_rtpsender.h | 16 +- .../api/test/mock_transformable_video_frame.h | 4 +- .../voip/webrtc/api/test/mock_video_decoder.h | 11 +- .../api/test/mock_video_encoder_factory.h | 4 - .../voip/webrtc/api/test/mock_video_track.h | 69 + .../test/network_emulation/cross_traffic.h | 4 +- .../network_emulation_interfaces.h | 12 +- .../api/test/network_emulation_manager.h | 41 +- .../api/test/peer_network_dependencies.h | 32 + .../peerconnection_quality_test_fixture.h | 208 +- .../api/test/stats_observer_interface.h | 2 +- .../voip/webrtc/api/test/time_controller.h | 14 +- .../api/test/track_id_stream_info_map.h | 6 +- .../video/function_video_encoder_factory.h | 2 +- .../test/video_quality_analyzer_interface.h | 42 +- .../api/test/video_quality_test_fixture.h | 2 +- .../webrtc/api/test/videocodec_test_fixture.h | 10 +- .../webrtc/api/test/videocodec_test_stats.cc | 141 +- .../webrtc/api/test/videocodec_test_stats.h | 14 + .../webrtc/api/transport/bitrate_settings.h | 2 +- .../data_channel_transport_interface.h | 17 +- .../jni/voip/webrtc/api/transport/enums.h | 10 + .../webrtc/api/transport/network_types.cc | 10 +- .../voip/webrtc/api/transport/network_types.h | 5 +- .../transport/rtp/dependency_descriptor.cc | 2 +- .../api/transport/rtp/dependency_descriptor.h | 21 +- .../sctp_transport_factory_interface.h | 2 +- .../jni/voip/webrtc/api/transport/stun.cc | 12 +- .../jni/voip/webrtc/api/transport/stun.h | 17 +- .../jni/voip/webrtc/api/turn_customizer.h | 2 +- .../jni/voip/webrtc/api/uma_metrics.h | 11 + .../jni/voip/webrtc/api/video/color_space.cc | 4 +- .../voip/webrtc/api/video/encoded_frame.cc | 2 +- .../jni/voip/webrtc/api/video/encoded_frame.h | 2 +- .../jni/voip/webrtc/api/video/encoded_image.h | 16 +- .../jni/voip/webrtc/api/video/i010_buffer.cc | 33 +- .../jni/voip/webrtc/api/video/i010_buffer.h | 14 +- .../jni/voip/webrtc/api/video/i420_buffer.cc | 31 - .../jni/voip/webrtc/api/video/i420_buffer.h | 14 +- .../jni/voip/webrtc/api/video/i444_buffer.cc | 211 + .../jni/voip/webrtc/api/video/i444_buffer.h | 104 + .../jni/voip/webrtc/api/video/nv12_buffer.cc | 3 +- .../jni/voip/webrtc/api/video/nv12_buffer.h | 4 +- .../voip/webrtc/api/video/render_resolution.h | 45 + .../api/video/rtp_video_frame_assembler.cc | 339 ++ .../api/video/rtp_video_frame_assembler.h | 76 + .../test/mock_recordable_encoded_frame.h | 34 - .../api/video/video_bitrate_allocation.h | 4 +- .../voip/webrtc/api/video/video_codec_type.h | 4 +- .../jni/voip/webrtc/api/video/video_frame.h | 2 +- .../webrtc/api/video/video_frame_buffer.cc | 18 +- .../webrtc/api/video/video_frame_buffer.h | 15 +- .../webrtc/api/video/video_sink_interface.h | 7 + .../webrtc/api/video/video_source_interface.h | 10 +- .../api/video/video_stream_decoder_create.h | 2 +- .../video/video_stream_encoder_interface.h | 24 +- .../api/video/video_stream_encoder_observer.h | 2 +- .../api/video/video_stream_encoder_settings.h | 16 +- .../jni/voip/webrtc/api/video/video_timing.cc | 9 + .../jni/voip/webrtc/api/video/video_timing.h | 7 +- .../jni/voip/webrtc/api/video_codecs/OWNERS | 4 - .../builtin_video_encoder_factory.cc | 9 - .../api/video_codecs/h264_profile_level_id.cc | 6 +- .../api/video_codecs/h264_profile_level_id.h | 1 + .../webrtc/api/video_codecs/video_codec.cc | 29 +- .../webrtc/api/video_codecs/video_codec.h | 13 +- .../webrtc/api/video_codecs/video_decoder.cc | 9 + .../webrtc/api/video_codecs/video_decoder.h | 65 +- .../api/video_codecs/video_decoder_factory.h | 25 +- ...video_decoder_software_fallback_wrapper.cc | 53 +- .../webrtc/api/video_codecs/video_encoder.cc | 19 +- .../webrtc/api/video_codecs/video_encoder.h | 39 +- .../api/video_codecs/video_encoder_config.cc | 11 +- .../api/video_codecs/video_encoder_config.h | 8 +- .../api/video_codecs/video_encoder_factory.h | 22 +- ...video_encoder_software_fallback_wrapper.cc | 34 +- .../video_encoder_software_fallback_wrapper.h | 2 +- .../vp8_frame_buffer_controller.h | 24 +- .../api/video_codecs/vp8_frame_config.cc | 4 +- .../webrtc/api/video_codecs/vp9_profile.cc | 1 - .../api/video_track_source_constraints.h | 32 + .../api/video_track_source_proxy_factory.h | 28 + .../jni/voip/webrtc/api/voip/voip_base.h | 26 +- .../jni/voip/webrtc/api/voip/voip_codec.h | 4 +- .../jni/voip/webrtc/api/voip/voip_dtmf.h | 6 +- .../webrtc/api/voip/voip_engine_factory.cc | 2 +- .../jni/voip/webrtc/api/voip/voip_network.h | 4 +- .../voip/webrtc/api/voip/voip_statistics.h | 8 +- .../webrtc/api/voip/voip_volume_control.h | 12 +- .../webrtc/api/wrapping_async_dns_resolver.cc | 31 + .../webrtc/api/wrapping_async_dns_resolver.h | 117 + TMessagesProj/jni/voip/webrtc/audio/OWNERS | 3 - .../voip/webrtc/audio/audio_receive_stream.cc | 202 +- .../voip/webrtc/audio/audio_receive_stream.h | 70 +- .../voip/webrtc/audio/audio_send_stream.cc | 70 +- .../jni/voip/webrtc/audio/audio_send_stream.h | 9 +- .../jni/voip/webrtc/audio/audio_state.cc | 4 +- .../jni/voip/webrtc/audio/audio_state.h | 4 +- .../voip/webrtc/audio/audio_transport_impl.cc | 50 +- .../voip/webrtc/audio/audio_transport_impl.h | 42 +- .../jni/voip/webrtc/audio/channel_receive.cc | 299 +- .../jni/voip/webrtc/audio/channel_receive.h | 14 +- ...nnel_receive_frame_transformer_delegate.cc | 26 +- ...annel_receive_frame_transformer_delegate.h | 14 +- .../jni/voip/webrtc/audio/channel_send.cc | 84 +- .../jni/voip/webrtc/audio/channel_send.h | 11 +- ...channel_send_frame_transformer_delegate.cc | 37 +- .../channel_send_frame_transformer_delegate.h | 14 +- .../webrtc/audio/mock_voe_channel_proxy.h | 11 +- .../voip/webrtc/audio/null_audio_poller.cc | 2 +- .../jni/voip/webrtc/audio/remix_resample.h | 12 +- .../audio/utility/audio_frame_operations.cc | 12 +- .../audio/utility/audio_frame_operations.h | 54 +- .../webrtc/audio/utility/channel_mixer.cc | 2 +- .../voip/webrtc/audio/utility/channel_mixer.h | 8 +- .../audio/utility/channel_mixing_matrix.cc | 4 +- .../audio/utility/channel_mixing_matrix.h | 12 +- .../voip/webrtc/audio/voip/audio_channel.cc | 13 +- .../voip/webrtc/audio/voip/audio_channel.h | 5 - .../jni/voip/webrtc/audio/voip/audio_egress.h | 6 +- .../webrtc/audio/voip/test/mock_task_queue.h | 60 - .../jni/voip/webrtc/audio/voip/voip_core.cc | 30 +- .../jni/voip/webrtc/audio/voip/voip_core.h | 19 +- TMessagesProj/jni/voip/webrtc/call/OWNERS | 2 + .../jni/voip/webrtc/call/adaptation/OWNERS | 2 +- .../adaptation/broadcast_resource_listener.cc | 3 +- .../resource_adaptation_processor.cc | 52 +- .../resource_adaptation_processor.h | 5 +- .../resource_adaptation_processor_interface.h | 2 - .../adaptation/video_source_restrictions.h | 4 +- .../call/adaptation/video_stream_adapter.cc | 45 +- .../call/adaptation/video_stream_adapter.h | 4 +- .../voip/webrtc/call/audio_receive_stream.h | 54 +- .../jni/voip/webrtc/call/audio_send_stream.cc | 4 +- .../jni/voip/webrtc/call/audio_send_stream.h | 2 + .../jni/voip/webrtc/call/bitrate_allocator.cc | 8 +- .../jni/voip/webrtc/call/bitrate_allocator.h | 14 +- TMessagesProj/jni/voip/webrtc/call/call.cc | 1125 ++--- TMessagesProj/jni/voip/webrtc/call/call.h | 39 +- .../jni/voip/webrtc/call/call_config.cc | 13 + .../jni/voip/webrtc/call/call_config.h | 11 +- .../jni/voip/webrtc/call/call_factory.cc | 136 +- .../jni/voip/webrtc/call/call_perf_tests.cc | 187 +- .../jni/voip/webrtc/call/degraded_call.cc | 98 +- .../jni/voip/webrtc/call/degraded_call.h | 38 +- .../jni/voip/webrtc/call/fake_network_pipe.cc | 3 +- .../jni/voip/webrtc/call/fake_network_pipe.h | 16 +- .../voip/webrtc/call/flexfec_receive_stream.h | 19 +- .../call/flexfec_receive_stream_impl.cc | 85 +- .../webrtc/call/flexfec_receive_stream_impl.h | 43 +- .../jni/voip/webrtc/call/packet_receiver.h | 33 - .../jni/voip/webrtc/call/rampup_tests.cc | 53 +- .../jni/voip/webrtc/call/rampup_tests.h | 4 +- .../jni/voip/webrtc/call/receive_stream.h | 93 + .../webrtc/call/rtp_bitrate_configurator.h | 11 +- .../jni/voip/webrtc/call/rtp_config.cc | 2 +- .../jni/voip/webrtc/call/rtp_config.h | 4 +- .../jni/voip/webrtc/call/rtp_demuxer.cc | 105 +- .../jni/voip/webrtc/call/rtp_demuxer.h | 59 +- .../voip/webrtc/call/rtp_payload_params.cc | 30 +- .../jni/voip/webrtc/call/rtp_payload_params.h | 20 +- .../voip/webrtc/call/rtp_transport_config.h | 50 + .../call/rtp_transport_controller_send.cc | 75 +- .../call/rtp_transport_controller_send.h | 30 +- .../rtp_transport_controller_send_factory.h | 37 + ...nsport_controller_send_factory_interface.h | 32 + .../rtp_transport_controller_send_interface.h | 2 +- .../jni/voip/webrtc/call/rtp_video_sender.cc | 166 +- .../jni/voip/webrtc/call/rtp_video_sender.h | 37 +- .../webrtc/call/rtp_video_sender_interface.h | 6 +- .../jni/voip/webrtc/call/simulated_network.cc | 4 +- .../jni/voip/webrtc/call/simulated_network.h | 2 +- TMessagesProj/jni/voip/webrtc/call/version.cc | 2 +- .../voip/webrtc/call/video_receive_stream.cc | 14 +- .../voip/webrtc/call/video_receive_stream.h | 71 +- .../jni/voip/webrtc/call/video_send_stream.cc | 3 +- .../jni/voip/webrtc/call/video_send_stream.h | 24 +- .../webrtc/common_audio/audio_converter.h | 13 +- .../voip/webrtc/common_audio/channel_buffer.h | 26 +- .../jni/voip/webrtc/common_audio/fir_filter.h | 4 +- .../webrtc/common_audio/fir_filter_avx2.cc | 2 +- .../voip/webrtc/common_audio/fir_filter_c.cc | 2 +- .../webrtc/common_audio/fir_filter_factory.cc | 2 +- .../webrtc/common_audio/fir_filter_factory.h | 2 +- .../webrtc/common_audio/fir_filter_neon.cc | 2 +- .../webrtc/common_audio/fir_filter_sse.cc | 2 +- .../webrtc/common_audio/include/audio_util.h | 24 +- .../voip/webrtc/common_audio/real_fourier.h | 2 +- .../resampler/push_sinc_resampler.cc | 4 +- .../resampler/push_sinc_resampler.h | 14 +- .../common_audio/resampler/resampler.cc | 1 - .../common_audio/resampler/sinc_resampler.cc | 19 +- .../common_audio/resampler/sinc_resampler.h | 36 +- .../resampler/sinc_resampler_avx2.cc | 2 +- .../resampler/sinc_resampler_sse.cc | 2 +- .../sinusoidal_linear_chirp_source.h | 9 +- .../voip/webrtc/common_audio/ring_buffer.c | 10 +- .../voip/webrtc/common_audio/ring_buffer.h | 18 +- .../dot_product_with_scale.h | 2 +- .../signal_processing/include/real_fft.h | 2 +- .../include/signal_processing_library.h | 200 +- .../signal_processing/splitting_filter.c | 30 +- .../webrtc/common_audio/smoothing_filter.cc | 12 +- .../webrtc/common_audio/smoothing_filter.h | 6 +- .../spl_sqrt_floor/spl_sqrt_floor.h | 4 +- .../common_audio/vad/include/webrtc_vad.h | 4 +- .../jni/voip/webrtc/common_audio/vad/vad.cc | 2 +- .../voip/webrtc/common_audio/vad/vad_core.c | 46 +- .../voip/webrtc/common_audio/vad/vad_core.h | 8 +- .../webrtc/common_audio/vad/vad_filterbank.c | 98 +- .../webrtc/common_audio/vad/vad_filterbank.h | 8 +- .../voip/webrtc/common_audio/vad/vad_gmm.c | 28 +- .../voip/webrtc/common_audio/vad/vad_gmm.h | 10 +- .../jni/voip/webrtc/common_audio/vad/vad_sp.c | 14 +- .../jni/voip/webrtc/common_audio/vad/vad_sp.h | 6 +- .../voip/webrtc/common_audio/wav_header.cc | 2 +- .../jni/voip/webrtc/common_video/OWNERS | 3 - .../common_video/frame_rate_estimator.h | 2 +- .../common_video/framerate_controller.cc | 88 + .../common_video/framerate_controller.h | 46 + .../h264/h264_bitstream_parser.cc | 145 +- .../webrtc/common_video/h264/pps_parser.cc | 162 +- .../webrtc/common_video/h264/pps_parser.h | 16 +- .../webrtc/common_video/h264/prefix_parser.cc | 1 + .../webrtc/common_video/h264/sps_parser.cc | 146 +- .../webrtc/common_video/h264/sps_parser.h | 9 +- .../common_video/h264/sps_vui_rewriter.cc | 296 +- .../common_video/h264/sps_vui_rewriter.h | 2 +- .../h265/h265_bitstream_parser.cc | 73 +- .../common_video/h265/h265_pps_parser.cc | 81 +- .../common_video/h265/h265_sps_parser.cc | 109 +- .../common_video/h265/h265_vps_parser.cc | 3 +- .../common_video/h265/legacy_bit_buffer.cc | 254 ++ .../common_video/h265/legacy_bit_buffer.h | 105 + .../include/video_frame_buffer_pool.h | 4 +- .../common_video/incoming_video_stream.cc | 3 +- .../libyuv/include/webrtc_libyuv.h | 6 +- .../common_video/libyuv/webrtc_libyuv.cc | 4 +- .../common_video/video_frame_buffer_pool.cc | 44 +- .../rtc_event_log/encoder/bit_writer.cc | 49 + .../rtc_event_log/encoder/bit_writer.h | 61 + .../rtc_event_log/encoder/blob_encoding.h | 8 +- .../rtc_event_log/encoder/delta_encoding.cc | 248 +- .../rtc_event_log/encoder/delta_encoding.h | 8 +- .../encoder/rtc_event_log_encoder_common.h | 4 +- .../encoder/rtc_event_log_encoder_legacy.cc | 46 +- .../rtc_event_log_encoder_new_format.cc | 52 +- .../encoder/rtc_event_log_encoder_v3.cc | 164 + .../encoder/rtc_event_log_encoder_v3.h | 46 + .../logging/rtc_event_log/encoder/var_int.cc | 14 +- .../logging/rtc_event_log/encoder/var_int.h | 14 +- .../fixed_length_encoding_parameters_v3.cc | 137 + .../fixed_length_encoding_parameters_v3.h | 96 + .../rtc_event_log/events/logged_rtp_rtcp.h | 259 ++ .../events/rtc_event_alr_state.cc | 10 + .../events/rtc_event_alr_state.h | 47 +- .../rtc_event_audio_network_adaptation.h | 45 +- .../events/rtc_event_audio_playout.cc | 5 + .../events/rtc_event_audio_playout.h | 54 +- .../rtc_event_audio_receive_stream_config.h | 43 +- .../rtc_event_audio_send_stream_config.h | 41 +- .../events/rtc_event_begin_log.cc | 73 + .../events/rtc_event_begin_log.h | 74 + .../rtc_event_bwe_update_delay_based.cc | 6 + .../events/rtc_event_bwe_update_delay_based.h | 103 +- .../events/rtc_event_bwe_update_loss_based.h | 59 +- .../events/rtc_event_definition.h | 152 + .../events/rtc_event_dtls_transport_state.h | 35 +- .../events/rtc_event_dtls_writable_state.h | 41 +- .../rtc_event_log/events/rtc_event_end_log.cc | 58 + .../rtc_event_log/events/rtc_event_end_log.h | 64 + .../events/rtc_event_field_encoding.cc | 299 ++ .../events/rtc_event_field_encoding.h | 179 + .../events/rtc_event_field_encoding_parser.cc | 398 ++ .../events/rtc_event_field_encoding_parser.h | 285 ++ .../events/rtc_event_field_extraction.cc | 60 + .../events/rtc_event_field_extraction.h | 84 + .../events/rtc_event_frame_decoded.h | 46 +- .../events/rtc_event_generic_ack_received.h | 68 +- .../rtc_event_generic_packet_received.h | 53 +- .../events/rtc_event_generic_packet_sent.h | 70 +- .../events/rtc_event_ice_candidate_pair.h | 59 +- .../rtc_event_ice_candidate_pair_config.h | 51 +- .../events/rtc_event_probe_cluster_created.h | 65 +- .../events/rtc_event_probe_result_failure.h | 49 +- .../events/rtc_event_probe_result_success.h | 49 +- .../events/rtc_event_remote_estimate.h | 40 +- .../events/rtc_event_route_change.h | 47 +- .../events/rtc_event_rtcp_packet_incoming.h | 18 + .../events/rtc_event_rtcp_packet_outgoing.h | 18 + .../events/rtc_event_rtp_packet_incoming.h | 19 + .../events/rtc_event_rtp_packet_outgoing.h | 21 +- .../rtc_event_video_receive_stream_config.h | 43 +- .../rtc_event_video_send_stream_config.h | 42 +- .../logging/rtc_event_log/logged_events.cc | 57 - .../logging/rtc_event_log/logged_events.h | 334 +- .../rtc_event_log/rtc_event_log2rtp_dump.cc | 4 +- .../rtc_event_log/rtc_event_log_impl.cc | 32 +- .../rtc_event_log/rtc_event_log_impl.h | 4 +- .../rtc_event_log/rtc_event_log_parser.cc | 609 ++- .../rtc_event_log/rtc_event_log_parser.h | 402 +- .../rtc_event_log_unittest_helper.cc | 1352 ++++++ .../rtc_event_log_unittest_helper.h | 330 ++ .../rtc_event_log/rtc_event_processor.cc | 2 +- .../rtc_event_log/rtc_event_processor.h | 4 +- .../media/base/adapted_video_track_source.cc | 5 + .../media/base/adapted_video_track_source.h | 4 +- .../jni/voip/webrtc/media/base/codec.cc | 19 +- .../jni/voip/webrtc/media/base/codec.h | 16 +- .../jni/voip/webrtc/media/base/delayable.h | 2 +- .../webrtc/media/base/fake_frame_source.cc | 3 +- .../webrtc/media/base/fake_media_engine.h | 2 +- .../media/base/fake_network_interface.h | 36 +- .../jni/voip/webrtc/media/base/fake_rtp.cc | 2 +- .../jni/voip/webrtc/media/base/fake_rtp.h | 2 +- .../media/base/h264_profile_level_id.cc | 43 - .../webrtc/media/base/h264_profile_level_id.h | 85 - .../voip/webrtc/media/base/media_channel.cc | 17 +- .../voip/webrtc/media/base/media_channel.h | 91 +- .../voip/webrtc/media/base/media_constants.cc | 5 +- .../voip/webrtc/media/base/media_constants.h | 3 +- .../voip/webrtc/media/base/media_engine.cc | 9 - .../jni/voip/webrtc/media/base/media_engine.h | 9 +- .../jni/voip/webrtc/media/base/rtp_utils.cc | 180 +- .../jni/voip/webrtc/media/base/rtp_utils.h | 26 +- .../media/base/sdp_video_format_utils.h | 14 +- .../voip/webrtc/media/base/stream_params.cc | 5 +- .../voip/webrtc/media/base/stream_params.h | 32 +- .../jni/voip/webrtc/media/base/test_utils.h | 8 +- .../voip/webrtc/media/base/video_adapter.cc | 54 +- .../voip/webrtc/media/base/video_adapter.h | 48 +- .../webrtc/media/base/video_broadcaster.cc | 21 +- .../webrtc/media/base/video_broadcaster.h | 13 + .../jni/voip/webrtc/media/base/video_common.h | 4 +- .../webrtc/media/base/video_source_base.cc | 49 + .../webrtc/media/base/video_source_base.h | 36 +- .../media/engine/encoder_simulcast_proxy.cc | 5 +- .../media/engine/encoder_simulcast_proxy.h | 4 - .../webrtc/media/engine/fake_webrtc_call.cc | 66 +- .../webrtc/media/engine/fake_webrtc_call.h | 46 +- .../media/engine/fake_webrtc_video_engine.cc | 39 +- .../media/engine/fake_webrtc_video_engine.h | 5 +- .../media/engine/internal_decoder_factory.cc | 59 +- .../media/engine/internal_decoder_factory.h | 2 + .../media/engine/internal_encoder_factory.cc | 38 +- .../media/engine/internal_encoder_factory.h | 6 +- .../media/engine/multiplex_codec_factory.h | 4 +- .../media/engine/payload_type_mapper.cc | 7 + .../webrtc/media/engine/payload_type_mapper.h | 4 +- .../jni/voip/webrtc/media/engine/simulcast.cc | 18 +- .../jni/voip/webrtc/media/engine/simulcast.h | 6 +- .../media/engine/simulcast_encoder_adapter.cc | 149 +- .../media/engine/simulcast_encoder_adapter.h | 35 +- .../media/engine/unhandled_packets_buffer.cc | 2 +- .../media/engine/unhandled_packets_buffer.h | 2 +- .../media/engine/webrtc_media_engine.cc | 48 +- .../webrtc/media/engine/webrtc_media_engine.h | 7 +- .../media/engine/webrtc_video_engine.cc | 584 +-- .../webrtc/media/engine/webrtc_video_engine.h | 46 +- .../media/engine/webrtc_voice_engine.cc | 431 +- .../webrtc/media/engine/webrtc_voice_engine.h | 12 +- .../webrtc/media/sctp/dcsctp_transport.cc | 143 +- .../voip/webrtc/media/sctp/dcsctp_transport.h | 9 +- .../media/sctp/sctp_transport_factory.cc | 9 +- .../media/sctp/sctp_transport_factory.h | 2 +- .../media/sctp/sctp_transport_internal.h | 36 +- .../webrtc/media/sctp/usrsctp_transport.cc | 148 +- .../webrtc/media/sctp/usrsctp_transport.h | 22 +- .../async_audio_processing.h | 10 +- .../voip/webrtc/modules/audio_coding/OWNERS | 1 + .../modules/audio_coding/acm2/acm_receiver.cc | 38 +- .../modules/audio_coding/acm2/acm_receiver.h | 10 +- .../audio_coding/acm2/acm_resampler.cc | 3 +- .../audio_coding/acm2/audio_coding_module.cc | 31 +- .../audio_coding/acm2/call_statistics.cc | 4 +- .../audio_coding/acm2/call_statistics.h | 4 +- .../audio_network_adaptor_impl.h | 6 +- .../bitrate_controller.cc | 2 +- .../bitrate_controller.h | 5 +- .../channel_controller.cc | 4 +- .../channel_controller.h | 5 +- .../audio_network_adaptor/config.proto | 21 +- .../controller_manager.cc | 12 +- .../controller_manager.h | 8 +- .../audio_network_adaptor/debug_dump.proto | 2 +- .../debug_dump_writer.cc | 2 +- .../audio_network_adaptor/debug_dump_writer.h | 1 - .../audio_network_adaptor/dtx_controller.cc | 2 +- .../audio_network_adaptor/dtx_controller.h | 5 +- .../audio_network_adaptor/event_log_writer.h | 6 +- .../fec_controller_plr_based.cc | 2 +- .../fec_controller_plr_based.h | 10 +- .../frame_length_controller.cc | 20 +- .../frame_length_controller.h | 6 +- .../include/audio_network_adaptor_config.h | 2 +- .../audio_coding/codecs/cng/webrtc_cng.cc | 10 +- .../audio_coding/codecs/cng/webrtc_cng.h | 18 +- .../codecs/g711/audio_decoder_pcm.cc | 16 +- .../codecs/g711/audio_decoder_pcm.h | 11 +- .../codecs/g711/audio_encoder_pcm.h | 9 +- .../codecs/g722/audio_decoder_g722.cc | 6 + .../codecs/g722/audio_decoder_g722.h | 19 +- .../codecs/g722/audio_encoder_g722.h | 5 +- .../codecs/ilbc/audio_decoder_ilbc.h | 6 +- .../codecs/ilbc/audio_encoder_ilbc.h | 5 +- .../codecs/ilbc/create_augmented_vec.c | 10 +- .../audio_coding/codecs/ilbc/get_cd_vec.c | 2 +- .../codecs/isac/audio_decoder_isac_t.h | 6 +- .../codecs/isac/audio_encoder_isac_t.h | 6 +- .../codecs/isac/fix/source/arith_routins.h | 10 +- .../isac/fix/source/bandwidth_estimator.h | 28 +- .../codecs/isac/fix/source/entropy_coding.c | 16 +- .../codecs/isac/fix/source/entropy_coding.h | 80 +- .../isac/fix/source/filterbank_internal.h | 8 +- .../codecs/isac/fix/source/filters.c | 2 +- .../codecs/isac/fix/source/isacfix.c | 10 +- .../isac/fix/source/lpc_masking_model.c | 16 +- .../codecs/isac/main/include/isac.h | 2 +- .../codecs/isac/main/source/arith_routines.h | 18 +- .../isac/main/source/bandwidth_estimator.h | 24 +- .../codecs/isac/main/source/entropy_coding.c | 2 +- .../codecs/isac/main/source/pitch_filter.c | 16 +- .../codecs/legacy_encoded_audio_frame.cc | 2 +- .../audio_decoder_multi_channel_opus_impl.cc | 4 + .../audio_decoder_multi_channel_opus_impl.h | 7 +- .../codecs/opus/audio_decoder_opus.h | 5 +- .../audio_encoder_multi_channel_opus_impl.cc | 4 + .../audio_encoder_multi_channel_opus_impl.h | 7 +- .../codecs/opus/audio_encoder_opus.cc | 16 +- .../codecs/opus/audio_encoder_opus.h | 7 +- .../audio_coding/codecs/opus/opus_fec_test.cc | 4 +- .../codecs/opus/opus_interface.cc | 22 +- .../codecs/pcm16b/audio_decoder_pcm16b.cc | 7 +- .../codecs/pcm16b/audio_decoder_pcm16b.h | 6 +- .../codecs/pcm16b/audio_encoder_pcm16b.h | 7 +- .../codecs/red/audio_encoder_copy_red.cc | 52 +- .../codecs/red/audio_encoder_copy_red.h | 6 +- .../codecs/tools/audio_codec_speed_test.cc | 5 +- .../codecs/tools/audio_codec_speed_test.h | 12 +- .../include/audio_coding_module.h | 10 +- .../include/audio_coding_module_typedefs.h | 9 +- .../modules/audio_coding/neteq/accelerate.cc | 11 +- .../modules/audio_coding/neteq/accelerate.h | 17 +- .../audio_coding/neteq/audio_multi_vector.cc | 33 +- .../audio_coding/neteq/audio_multi_vector.h | 61 +- .../audio_coding/neteq/audio_vector.cc | 17 +- .../modules/audio_coding/neteq/audio_vector.h | 58 +- .../audio_coding/neteq/background_noise.cc | 37 +- .../audio_coding/neteq/background_noise.h | 28 +- .../audio_coding/neteq/buffer_level_filter.cc | 8 +- .../audio_coding/neteq/buffer_level_filter.h | 12 +- .../audio_coding/neteq/comfort_noise.cc | 9 +- .../audio_coding/neteq/comfort_noise.h | 12 +- .../audio_coding/neteq/cross_correlation.h | 12 +- .../audio_coding/neteq/decision_logic.cc | 25 +- .../audio_coding/neteq/decision_logic.h | 32 +- .../audio_coding/neteq/decoder_database.cc | 8 +- .../audio_coding/neteq/decoder_database.h | 38 +- .../audio_coding/neteq/delay_manager.cc | 258 +- .../audio_coding/neteq/delay_manager.h | 95 +- .../modules/audio_coding/neteq/dsp_helper.cc | 9 +- .../modules/audio_coding/neteq/dsp_helper.h | 66 +- .../modules/audio_coding/neteq/dtmf_buffer.cc | 8 +- .../modules/audio_coding/neteq/dtmf_buffer.h | 23 +- .../audio_coding/neteq/dtmf_tone_generator.cc | 4 +- .../audio_coding/neteq/dtmf_tone_generator.h | 7 +- .../modules/audio_coding/neteq/expand.cc | 66 +- .../modules/audio_coding/neteq/expand.h | 19 +- .../audio_coding/neteq/expand_uma_logger.h | 6 +- .../modules/audio_coding/neteq/histogram.cc | 38 +- .../modules/audio_coding/neteq/histogram.h | 8 +- .../modules/audio_coding/neteq/merge.cc | 49 +- .../webrtc/modules/audio_coding/neteq/merge.h | 34 +- .../neteq/mock/mock_delay_manager.h | 20 +- .../audio_coding/neteq/nack_tracker.cc | 147 +- .../modules/audio_coding/neteq/nack_tracker.h | 110 +- .../modules/audio_coding/neteq/neteq_impl.cc | 291 +- .../modules/audio_coding/neteq/neteq_impl.h | 55 +- .../modules/audio_coding/neteq/normal.cc | 6 +- .../modules/audio_coding/neteq/normal.h | 16 +- .../modules/audio_coding/neteq/packet.h | 4 +- .../audio_coding/neteq/packet_buffer.cc | 10 +- .../audio_coding/neteq/packet_buffer.h | 21 +- .../audio_coding/neteq/post_decode_vad.h | 10 +- .../audio_coding/neteq/preemptive_expand.cc | 8 +- .../audio_coding/neteq/preemptive_expand.h | 14 +- .../audio_coding/neteq/random_vector.h | 7 +- .../neteq/red_payload_splitter.cc | 23 +- .../audio_coding/neteq/red_payload_splitter.h | 15 +- .../neteq/relative_arrival_delay_tracker.cc | 83 + .../neteq/relative_arrival_delay_tracker.h | 61 + .../audio_coding/neteq/reorder_optimizer.cc | 75 + .../audio_coding/neteq/reorder_optimizer.h | 43 + .../neteq/statistics_calculator.cc | 5 +- .../neteq/statistics_calculator.h | 40 +- .../modules/audio_coding/neteq/sync_buffer.cc | 12 +- .../modules/audio_coding/neteq/sync_buffer.h | 42 +- .../audio_coding/neteq/time_stretch.cc | 47 +- .../modules/audio_coding/neteq/time_stretch.h | 21 +- .../audio_coding/neteq/timestamp_scaler.cc | 2 +- .../audio_coding/neteq/timestamp_scaler.h | 14 +- .../audio_coding/neteq/underrun_optimizer.cc | 71 + .../audio_coding/neteq/underrun_optimizer.h | 50 + .../audio_device/android/aaudio_player.cc | 34 +- .../audio_device/android/aaudio_player.h | 4 +- .../audio_device/android/aaudio_recorder.cc | 44 +- .../audio_device/android/aaudio_recorder.h | 4 +- .../audio_device/android/aaudio_wrapper.cc | 52 +- .../android/audio_device_template.h | 88 +- .../audio_device/android/audio_manager.cc | 29 +- .../android/audio_merged_screen_record_jni.cc | 36 +- .../audio_device/android/audio_record_jni.cc | 44 +- .../audio_device/android/audio_record_jni.h | 13 +- .../android/audio_screen_record_jni.cc | 36 +- .../audio_device/android/audio_track_jni.cc | 30 +- .../audio_device/android/audio_track_jni.h | 12 +- .../modules/audio_device/android/build_info.h | 2 +- .../audio_device/android/opensles_player.h | 4 +- .../audio_device/android/opensles_recorder.h | 4 +- .../audio_device/audio_device_buffer.cc | 119 +- .../audio_device/audio_device_buffer.h | 13 + .../audio_device_data_observer.cc | 40 +- .../modules/audio_device/audio_device_impl.cc | 252 +- .../audio_device/dummy/file_audio_device.h | 4 +- .../modules/audio_device/fine_audio_buffer.cc | 18 +- .../modules/audio_device/fine_audio_buffer.h | 16 +- .../include/audio_device_data_observer.h | 20 +- .../include/audio_device_defines.h | 45 +- .../include/audio_device_factory.cc | 4 +- .../include/audio_device_factory.h | 2 +- .../include/mock_audio_transport.h | 39 +- .../audio_device/include/test_audio_device.cc | 10 +- .../audio_device/include/test_audio_device.h | 18 +- .../linux/audio_device_alsa_linux.cc | 13 +- .../linux/audio_device_alsa_linux.h | 8 +- .../linux/audio_device_pulse_linux.cc | 2 +- .../linux/latebindingsymboltable_linux.h | 14 +- .../modules/audio_mixer/audio_mixer_impl.h | 6 +- .../default_output_rate_calculator.h | 4 +- .../aec3/adaptive_fir_filter.cc | 32 +- .../aec3/adaptive_fir_filter_avx2.cc | 16 +- .../audio_processing/aec3/aec3_common.cc | 8 +- .../audio_processing/aec3/aec3_common.h | 4 +- .../modules/audio_processing/aec3/aec3_fft.cc | 8 +- .../modules/audio_processing/aec3/aec3_fft.h | 6 +- .../audio_processing/aec3/aec_state.cc | 5 +- .../modules/audio_processing/aec3/aec_state.h | 14 +- .../audio_processing/aec3/alignment_mixer.h | 2 +- .../aec3/block_delay_buffer.cc | 17 +- .../aec3/block_processor_metrics.h | 7 +- .../aec3/comfort_noise_generator.h | 1 - .../modules/audio_processing/aec3/decimator.h | 6 +- .../audio_processing/aec3/echo_audibility.h | 1 - .../audio_processing/aec3/echo_canceller3.cc | 56 +- .../aec3/echo_path_delay_estimator.h | 6 +- .../audio_processing/aec3/echo_remover.cc | 14 +- .../aec3/echo_remover_metrics.cc | 2 +- .../aec3/echo_remover_metrics.h | 6 +- .../audio_processing/aec3/erl_estimator.h | 5 +- .../audio_processing/aec3/erle_estimator.h | 12 + .../audio_processing/aec3/filter_analyzer.cc | 21 +- .../audio_processing/aec3/filter_analyzer.h | 1 - .../aec3/fullband_erle_estimator.h | 2 +- .../audio_processing/aec3/matched_filter.cc | 65 +- .../audio_processing/aec3/matched_filter.h | 3 + .../aec3/matched_filter_avx2.cc | 14 +- .../audio_processing/aec3/render_buffer.cc | 17 +- .../aec3/render_delay_controller_metrics.h | 7 +- .../aec3/render_signal_analyzer.h | 6 +- .../aec3/residual_echo_estimator.cc | 44 +- .../aec3/residual_echo_estimator.h | 15 +- .../aec3/reverb_decay_estimator.cc | 5 +- .../aec3/reverb_decay_estimator.h | 12 +- .../aec3/reverb_frequency_response.cc | 14 +- .../aec3/reverb_frequency_response.h | 4 +- .../aec3/reverb_model_estimator.cc | 5 +- .../aec3/reverb_model_estimator.h | 9 +- .../aec3/subband_erle_estimator.cc | 11 + .../aec3/subband_erle_estimator.h | 7 + .../aec3/suppression_filter.cc | 31 +- .../aec3/suppression_filter.h | 6 +- .../audio_processing/aec3/suppression_gain.cc | 59 +- .../audio_processing/aec3/suppression_gain.h | 14 +- .../audio_processing/aec3/vector_math.h | 6 + .../aec_dump/aec_dump_factory.h | 6 +- .../aec_dump/aec_dump_impl.cc | 4 +- .../aec_dump/capture_stream_info.cc | 4 +- .../aec_dump/write_to_file_task.cc | 4 +- .../audio_processing/aecm/aecm_core.cc | 14 +- .../modules/audio_processing/aecm/aecm_core.h | 18 +- .../audio_processing/aecm/aecm_core_c.cc | 14 +- .../audio_processing/aecm/aecm_core_mips.cc | 12 +- .../modules/audio_processing/agc/agc.cc | 16 +- .../webrtc/modules/audio_processing/agc/agc.h | 7 +- .../agc/agc_manager_direct.cc | 231 +- .../audio_processing/agc/agc_manager_direct.h | 69 +- .../agc/analog_gain_stats_reporter.cc | 130 + .../agc/analog_gain_stats_reporter.h | 67 + .../agc/clipping_predictor.cc | 383 ++ .../audio_processing/agc/clipping_predictor.h | 63 + .../agc/clipping_predictor_evaluator.cc | 214 + .../agc/clipping_predictor_evaluator.h | 122 + .../agc/clipping_predictor_level_buffer.cc | 77 + .../agc/clipping_predictor_level_buffer.h | 71 + .../audio_processing/agc/gain_control.h | 14 +- .../audio_processing/agc/legacy/analog_agc.cc | 2 +- .../agc/legacy/digital_agc.cc | 20 +- .../agc/loudness_histogram.cc | 2 +- .../audio_processing/agc/loudness_histogram.h | 8 +- .../modules/audio_processing/agc/mock_agc.h | 6 +- .../audio_processing/agc2/adaptive_agc.cc | 126 - .../agc2/adaptive_digital_gain_applier.cc | 112 +- .../agc2/adaptive_digital_gain_applier.h | 22 +- .../agc2/adaptive_digital_gain_controller.cc | 108 + ...c.h => adaptive_digital_gain_controller.h} | 38 +- .../agc2/adaptive_mode_level_estimator.cc | 57 +- .../agc2/adaptive_mode_level_estimator.h | 19 +- .../audio_processing/agc2/agc2_common.h | 22 +- .../agc2/agc2_testing_common.h | 2 +- .../audio_processing/agc2/biquad_filter.cc | 37 +- .../audio_processing/agc2/biquad_filter.h | 52 +- .../agc2/compute_interpolated_gain_curve.cc | 4 +- .../agc2/compute_interpolated_gain_curve.h | 4 +- .../audio_processing/agc2/down_sampler.cc | 99 - .../audio_processing/agc2/down_sampler.h | 42 - .../agc2/fixed_digital_level_estimator.cc | 17 +- .../agc2/fixed_digital_level_estimator.h | 7 +- .../audio_processing/agc2/gain_applier.cc | 11 +- .../audio_processing/agc2/gain_applier.h | 2 +- .../agc2/interpolated_gain_curve.cc | 6 +- .../agc2/interpolated_gain_curve.h | 6 +- .../modules/audio_processing/agc2/limiter.cc | 40 +- .../modules/audio_processing/agc2/limiter.h | 7 +- .../agc2/limiter_db_gain_curve.cc | 2 +- .../agc2/noise_level_estimator.cc | 124 +- .../agc2/noise_level_estimator.h | 4 - .../agc2/noise_spectrum_estimator.cc | 70 - .../agc2/noise_spectrum_estimator.h | 42 - .../agc2/rnn_vad/auto_correlation.cc | 2 +- .../agc2/rnn_vad/auto_correlation.h | 2 +- .../audio_processing/agc2/rnn_vad/common.h | 4 +- .../agc2/rnn_vad/features_extraction.cc | 13 +- .../agc2/rnn_vad/features_extraction.h | 2 +- .../agc2/rnn_vad/lp_residual.cc | 6 +- .../agc2/rnn_vad/lp_residual.h | 6 +- .../agc2/rnn_vad/pitch_search.cc | 2 +- .../agc2/rnn_vad/pitch_search_internal.cc | 36 +- .../agc2/rnn_vad/ring_buffer.h | 4 +- .../audio_processing/agc2/rnn_vad/rnn_fc.cc | 4 +- .../audio_processing/agc2/rnn_vad/rnn_gru.cc | 2 +- .../rnn_vad/spectral_features_internal.cc | 2 +- .../agc2/rnn_vad/spectral_features_internal.h | 14 +- .../agc2/rnn_vad/symmetric_matrix_buffer.h | 6 +- .../agc2/saturation_protector.cc | 11 +- .../agc2/saturation_protector.h | 1 - .../agc2/signal_classifier.cc | 177 - .../audio_processing/agc2/signal_classifier.h | 73 - .../audio_processing/agc2/vad_with_level.cc | 107 - .../audio_processing/agc2/vad_with_level.h | 64 - .../audio_processing/agc2/vad_wrapper.cc | 106 + .../audio_processing/agc2/vad_wrapper.h | 78 + .../modules/audio_processing/audio_buffer.h | 16 +- .../audio_processing_builder_impl.cc | 20 +- .../audio_processing/audio_processing_impl.cc | 304 +- .../audio_processing/audio_processing_impl.h | 75 +- .../webrtc/modules/audio_processing/common.h | 38 - .../echo_control_mobile_impl.cc | 7 +- .../echo_control_mobile_impl.h | 2 +- .../audio_processing/gain_control_impl.cc | 2 +- .../audio_processing/gain_controller2.cc | 156 +- .../audio_processing/gain_controller2.h | 27 +- .../audio_processing/high_pass_filter.cc | 4 +- .../include/audio_frame_proxies.cc | 12 +- .../include/audio_frame_proxies.h | 12 +- .../include/audio_frame_view.h | 25 +- .../include/audio_processing.cc | 196 +- .../include/audio_processing.h | 243 +- .../include/audio_processing_statistics.h | 16 +- .../modules/audio_processing/include/config.h | 131 - .../include/mock_audio_processing.h | 30 +- .../audio_processing/level_estimator.cc | 29 - .../audio_processing/level_estimator.h | 47 - .../audio_processing/ns/suppression_params.cc | 2 +- .../optionally_built_submodule_creators.h | 4 +- .../residual_echo_detector.cc | 8 - .../audio_processing/residual_echo_detector.h | 4 +- .../modules/audio_processing/rms_level.h | 4 +- .../three_band_filter_bank.cc | 36 +- .../audio_processing/three_band_filter_bank.h | 8 +- .../transient/click_annotate.cc | 2 +- .../transient/dyadic_decimator.h | 6 +- .../audio_processing/transient/file_utils.h | 46 +- .../transient/moving_moments.h | 8 +- .../transient/transient_detector.cc | 6 +- .../transient/transient_detector.h | 6 +- .../transient/transient_suppressor.h | 22 +- .../transient/transient_suppressor_impl.cc | 20 +- .../transient/transient_suppressor_impl.h | 22 +- .../audio_processing/transient/wpd_node.h | 2 +- .../audio_processing/transient/wpd_tree.h | 2 +- .../audio_processing/typing_detection.h | 12 +- .../utility/cascaded_biquad_filter.cc | 29 +- .../utility/delay_estimator.cc | 146 +- .../utility/delay_estimator.h | 6 +- .../utility/delay_estimator_internal.h | 4 +- .../utility/delay_estimator_wrapper.cc | 28 +- .../utility/delay_estimator_wrapper.h | 38 +- .../audio_processing/utility/pffft_wrapper.h | 8 +- .../webrtc/modules/audio_processing/vad/gmm.h | 10 +- .../audio_processing/vad/pitch_based_vad.h | 2 +- .../audio_processing/vad/pitch_internal.h | 2 +- .../audio_processing/vad/standalone_vad.h | 4 +- .../audio_processing/vad/vad_audio_proc.cc | 4 +- .../vad/vad_circular_buffer.h | 6 +- .../vad/voice_activity_detector.cc | 5 +- .../vad/voice_activity_detector.h | 2 + .../audio_processing/voice_detection.cc | 92 - .../audio_processing/voice_detection.h | 59 - .../modules/congestion_controller/OWNERS | 3 +- ...acknowledged_bitrate_estimator_interface.h | 12 +- .../goog_cc/delay_based_bwe.cc | 66 +- .../goog_cc/delay_based_bwe.h | 1 - .../delay_increase_detector_interface.h | 8 +- .../goog_cc/goog_cc_network_control.cc | 20 +- .../goog_cc/goog_cc_network_control.h | 1 + .../goog_cc/inter_arrival_delta.cc | 4 +- .../goog_cc/inter_arrival_delta.h | 14 +- .../loss_based_bandwidth_estimation.cc | 10 +- .../goog_cc/loss_based_bwe_v2.cc | 755 ++++ .../goog_cc/loss_based_bwe_v2.h | 146 + .../goog_cc/probe_bitrate_estimator.cc | 4 +- .../goog_cc/probe_bitrate_estimator.h | 2 +- .../goog_cc/probe_controller.cc | 16 +- .../goog_cc/probe_controller.h | 8 +- .../goog_cc/send_side_bandwidth_estimation.cc | 77 +- .../goog_cc/send_side_bandwidth_estimation.h | 18 +- .../goog_cc/test/goog_cc_printer.cc | 2 +- .../goog_cc/test/goog_cc_printer.h | 2 +- .../goog_cc/trendline_estimator.cc | 2 +- .../goog_cc/trendline_estimator.h | 6 +- .../receive_side_congestion_controller.h | 2 +- .../pcc/monitor_interval.cc | 5 +- .../congestion_controller/pcc/rtt_tracker.cc | 2 +- .../congestion_controller/remb_throttler.h | 2 +- .../rtp/control_handler.h | 5 +- .../rtp/transport_feedback_demuxer.cc | 58 +- .../rtp/transport_feedback_demuxer.h | 25 +- .../jni/voip/webrtc/modules/include/module.h | 2 +- .../modules/include/module_common_types.h | 2 +- .../webrtc/modules/include/module_fec_types.h | 4 +- .../webrtc/modules/pacing/bitrate_prober.cc | 2 +- .../webrtc/modules/pacing/bitrate_prober.h | 8 +- .../webrtc/modules/pacing/paced_sender.cc | 12 +- .../voip/webrtc/modules/pacing/paced_sender.h | 27 +- .../modules/pacing/pacing_controller.cc | 29 +- .../webrtc/modules/pacing/pacing_controller.h | 23 +- .../webrtc/modules/pacing/packet_router.cc | 5 + .../webrtc/modules/pacing/packet_router.h | 9 +- .../pacing/round_robin_packet_queue.cc | 12 +- .../modules/pacing/round_robin_packet_queue.h | 4 +- .../webrtc/modules/pacing/rtp_packet_pacer.h | 2 +- .../modules/pacing/task_queue_paced_sender.cc | 118 +- .../modules/pacing/task_queue_paced_sender.h | 40 +- .../aimd_rate_control.cc | 9 +- .../include/remote_bitrate_estimator.h | 21 +- .../remote_bitrate_estimator/inter_arrival.cc | 16 +- .../remote_bitrate_estimator/inter_arrival.h | 16 +- .../overuse_detector.h | 10 +- .../overuse_estimator.cc | 9 +- .../overuse_estimator.h | 8 +- .../remote_bitrate_estimator_abs_send_time.cc | 323 +- .../remote_bitrate_estimator_abs_send_time.h | 100 +- .../remote_bitrate_estimator_single_stream.cc | 7 +- .../remote_bitrate_estimator_single_stream.h | 2 +- .../remote_estimator_proxy.cc | 2 +- .../remote_estimator_proxy.h | 2 +- .../test/bwe_test_logging.h | 53 +- .../remote_bitrate_estimator/tools/bwe_rtp.cc | 48 +- .../remote_bitrate_estimator/tools/bwe_rtp.h | 21 +- .../tools/rtp_to_text.cc | 30 +- .../rtp_rtcp/include/receive_statistics.h | 2 +- .../include/remote_ntp_time_estimator.h | 13 +- .../webrtc/modules/rtp_rtcp/include/rtp_cvo.h | 4 +- .../include/rtp_header_extension_map.h | 8 +- .../rtp_rtcp/include/rtp_rtcp_defines.cc | 3 + .../rtp_rtcp/include/rtp_rtcp_defines.h | 34 +- .../rtp_rtcp/include/ulpfec_receiver.h | 5 +- .../modules/rtp_rtcp/mocks/mock_rtp_rtcp.h | 12 +- .../absolute_capture_time_interpolator.h | 4 +- .../source/absolute_capture_time_receiver.cc | 41 - .../source/absolute_capture_time_receiver.h | 39 - .../source/absolute_capture_time_sender.cc | 8 +- .../source/absolute_capture_time_sender.h | 4 +- .../deprecated_rtp_sender_egress.cc | 26 +- .../deprecated/deprecated_rtp_sender_egress.h | 9 +- .../source/fec_private_tables_bursty.h | 2 +- .../rtp_rtcp/source/fec_test_helper.cc | 4 +- .../modules/rtp_rtcp/source/fec_test_helper.h | 6 +- .../source/flexfec_header_reader_writer.cc | 9 +- .../rtp_rtcp/source/flexfec_receiver.cc | 2 +- .../modules/rtp_rtcp/source/flexfec_sender.cc | 10 +- .../source/forward_error_correction.cc | 26 +- .../source/forward_error_correction.h | 50 +- .../forward_error_correction_internal.cc | 8 +- .../forward_error_correction_internal.h | 18 +- .../rtp_rtcp/source/packet_sequencer.cc | 92 +- .../rtp_rtcp/source/packet_sequencer.h | 20 +- .../source/receive_statistics_impl.cc | 17 +- .../rtp_rtcp/source/receive_statistics_impl.h | 5 +- .../source/rtcp_packet/compound_packet.h | 7 +- .../rtp_rtcp/source/rtcp_packet/dlrr.h | 10 + .../source/rtcp_packet/loss_notification.cc | 2 +- .../source/rtcp_packet/loss_notification.h | 4 +- .../rtp_rtcp/source/rtcp_packet/rrtr.h | 8 + .../source/rtcp_packet/transport_feedback.cc | 25 +- .../source/rtcp_packet/transport_feedback.h | 12 +- .../modules/rtp_rtcp/source/rtcp_receiver.cc | 271 +- .../modules/rtp_rtcp/source/rtcp_receiver.h | 158 +- .../modules/rtp_rtcp/source/rtcp_sender.cc | 146 +- .../modules/rtp_rtcp/source/rtcp_sender.h | 78 +- .../rtp_rtcp/source/rtcp_transceiver.cc | 10 +- .../rtp_rtcp/source/rtcp_transceiver.h | 8 +- .../source/rtcp_transceiver_config.cc | 10 +- .../rtp_rtcp/source/rtcp_transceiver_config.h | 83 +- .../rtp_rtcp/source/rtcp_transceiver_impl.cc | 421 +- .../rtp_rtcp/source/rtcp_transceiver_impl.h | 68 +- .../rtp_dependency_descriptor_extension.cc | 1 - .../rtp_dependency_descriptor_extension.h | 9 +- .../rtp_dependency_descriptor_reader.cc | 93 +- .../source/rtp_dependency_descriptor_reader.h | 12 +- .../rtp_dependency_descriptor_writer.cc | 11 +- .../source/rtp_dependency_descriptor_writer.h | 8 +- .../modules/rtp_rtcp/source/rtp_format.h | 4 +- .../modules/rtp_rtcp/source/rtp_format_h264.h | 8 +- .../source/rtp_format_video_generic.cc | 1 - .../source/rtp_format_video_generic.h | 12 +- .../modules/rtp_rtcp/source/rtp_format_vp8.cc | 31 +- .../modules/rtp_rtcp/source/rtp_format_vp8.h | 8 +- .../source/rtp_format_vp8_test_helper.cc | 26 +- .../source/rtp_format_vp8_test_helper.h | 7 +- .../modules/rtp_rtcp/source/rtp_format_vp9.h | 12 +- .../rtp_generic_frame_descriptor_extension.cc | 3 +- .../rtp_generic_frame_descriptor_extension.h | 8 +- .../source/rtp_header_extension_map.cc | 34 +- .../source/rtp_header_extension_size.cc | 2 +- .../source/rtp_header_extension_size.h | 4 +- .../rtp_rtcp/source/rtp_header_extensions.cc | 86 +- .../rtp_rtcp/source/rtp_header_extensions.h | 91 +- .../modules/rtp_rtcp/source/rtp_packet.cc | 31 +- .../modules/rtp_rtcp/source/rtp_packet.h | 23 +- .../rtp_rtcp/source/rtp_packet_history.cc | 8 +- .../rtp_rtcp/source/rtp_packet_history.h | 10 +- .../rtp_rtcp/source/rtp_packet_received.h | 10 - .../rtp_rtcp/source/rtp_packet_to_send.h | 60 +- .../rtp_rtcp/source/rtp_packetizer_av1.cc | 10 +- .../modules/rtp_rtcp/source/rtp_rtcp_config.h | 10 +- .../modules/rtp_rtcp/source/rtp_rtcp_impl.cc | 83 +- .../modules/rtp_rtcp/source/rtp_rtcp_impl.h | 15 +- .../modules/rtp_rtcp/source/rtp_rtcp_impl2.cc | 275 +- .../modules/rtp_rtcp/source/rtp_rtcp_impl2.h | 61 +- .../rtp_rtcp/source/rtp_rtcp_interface.h | 48 +- .../modules/rtp_rtcp/source/rtp_sender.cc | 110 +- .../modules/rtp_rtcp/source/rtp_sender.h | 26 +- .../rtp_rtcp/source/rtp_sender_audio.cc | 51 +- .../rtp_rtcp/source/rtp_sender_audio.h | 2 + .../rtp_rtcp/source/rtp_sender_egress.cc | 76 +- .../rtp_rtcp/source/rtp_sender_egress.h | 12 +- .../rtp_rtcp/source/rtp_sender_video.cc | 112 +- .../rtp_rtcp/source/rtp_sender_video.h | 29 +- ...sender_video_frame_transformer_delegate.cc | 15 +- ..._sender_video_frame_transformer_delegate.h | 14 +- .../source/rtp_sequence_number_map.cc | 6 +- .../rtp_rtcp/source/rtp_sequence_number_map.h | 2 +- .../modules/rtp_rtcp/source/rtp_util.cc | 63 + .../webrtc/modules/rtp_rtcp/source/rtp_util.h | 31 + .../modules/rtp_rtcp/source/rtp_utility.cc | 555 --- .../modules/rtp_rtcp/source/rtp_utility.h | 55 - .../rtp_rtcp/source/rtp_video_header.h | 6 + .../rtp_video_layers_allocation_extension.cc | 15 +- .../rtp_video_layers_allocation_extension.h | 8 +- .../modules/rtp_rtcp/source/source_tracker.cc | 2 +- .../modules/rtp_rtcp/source/source_tracker.h | 12 +- .../modules/rtp_rtcp/source/time_util.cc | 42 - .../modules/rtp_rtcp/source/time_util.h | 14 - .../rtp_rtcp/source/ulpfec_generator.cc | 23 +- .../rtp_rtcp/source/ulpfec_generator.h | 6 +- .../source/ulpfec_header_reader_writer.cc | 2 +- .../rtp_rtcp/source/ulpfec_receiver_impl.cc | 10 +- .../rtp_rtcp/source/ulpfec_receiver_impl.h | 4 +- .../source/video_rtp_depacketizer_av1.cc | 31 +- .../source/video_rtp_depacketizer_h264.cc | 3 +- .../source/video_rtp_depacketizer_vp8.cc | 31 +- .../source/video_rtp_depacketizer_vp8.h | 2 +- .../source/video_rtp_depacketizer_vp9.cc | 191 +- .../source/video_rtp_depacketizer_vp9.h | 2 +- .../modules/utility/include/helpers_android.h | 4 +- .../modules/utility/include/jvm_android.h | 10 +- .../modules/utility/source/helpers_android.cc | 3 +- .../modules/utility/source/jvm_android.cc | 42 +- .../utility/source/process_thread_impl.cc | 1 + .../utility/source/process_thread_impl.h | 5 +- .../modules/video_capture/device_info_impl.cc | 3 +- .../modules/video_capture/device_info_impl.h | 2 +- .../video_capture/linux/device_info_linux.cc | 6 +- .../modules/video_capture/video_capture.h | 2 +- .../video_capture/video_capture_impl.h | 2 +- .../modules/video_coding/codec_timer.cc | 4 +- .../webrtc/modules/video_coding/codec_timer.h | 2 +- .../video_coding/codecs/av1/av1_svc_config.cc | 14 +- .../video_coding/codecs/av1/dav1d_decoder.cc | 197 + ..._av1_encoder_absent.cc => dav1d_decoder.h} | 15 +- .../codecs/av1/libaom_av1_decoder.cc | 21 +- .../codecs/av1/libaom_av1_encoder.cc | 112 +- .../codecs/av1/libaom_av1_encoder.h | 6 +- .../av1/libaom_av1_encoder_supported.cc | 41 + .../codecs/av1/libaom_av1_encoder_supported.h | 29 + .../modules/video_coding/codecs/h264/h264.cc | 53 +- .../codecs/h264/h264_decoder_impl.cc | 279 +- .../codecs/h264/h264_decoder_impl.h | 15 +- .../codecs/h264/h264_encoder_impl.cc | 71 +- .../codecs/h264/h264_encoder_impl.h | 4 +- .../video_coding/codecs/h264/include/h264.h | 13 +- .../codecs/h264/include/h264_globals.h | 2 +- .../codecs/interface/libvpx_interface.cc | 26 +- .../include/multiplex_decoder_adapter.h | 5 +- .../include/multiplex_encoder_adapter.h | 2 +- .../multiplex/multiplex_decoder_adapter.cc | 22 +- .../multiplex_encoded_image_packer.h | 6 +- .../multiplex/multiplex_encoder_adapter.cc | 10 +- .../codecs/vp8/default_temporal_layers.cc | 25 +- .../video_coding/codecs/vp8/include/vp8.h | 2 + .../codecs/vp8/libvpx_vp8_decoder.cc | 17 +- .../codecs/vp8/libvpx_vp8_decoder.h | 5 +- .../codecs/vp8/libvpx_vp8_encoder.cc | 97 +- .../codecs/vp8/libvpx_vp8_encoder.h | 10 +- .../codecs/vp8/screenshare_layers.cc | 11 +- .../video_coding/codecs/vp9/include/vp9.h | 4 +- .../codecs/vp9/include/vp9_globals.h | 29 +- .../codecs/vp9/libvpx_vp9_decoder.cc | 75 +- .../codecs/vp9/libvpx_vp9_decoder.h | 5 +- .../codecs/vp9/libvpx_vp9_encoder.cc | 218 +- .../codecs/vp9/libvpx_vp9_encoder.h | 39 +- .../video_coding/codecs/vp9/svc_config.cc | 13 +- .../codecs/vp9/svc_rate_allocator.h | 17 - .../modules/video_coding/codecs/vp9/vp9.cc | 22 +- .../codecs/vp9/vp9_frame_buffer_pool.cc | 16 +- .../codecs/vp9/vp9_frame_buffer_pool.h | 14 +- .../modules/video_coding/decoder_database.cc | 169 +- .../modules/video_coding/decoder_database.h | 66 +- .../modules/video_coding/decoding_state.cc | 11 +- .../video_coding/deprecated/nack_module.cc | 2 +- .../video_coding/deprecated/nack_module.h | 4 +- .../modules/video_coding/event_wrapper.h | 2 +- .../video_coding/fec_controller_default.cc | 11 +- .../video_coding/fec_controller_default.h | 12 +- .../modules/video_coding/frame_buffer.cc | 9 +- .../modules/video_coding/frame_buffer2.cc | 162 +- .../modules/video_coding/frame_buffer2.h | 47 +- .../modules/video_coding/frame_buffer3.cc | 281 ++ .../modules/video_coding/frame_buffer3.h | 97 + .../modules/video_coding/frame_helpers.cc | 90 + .../modules/video_coding/frame_helpers.h | 31 + .../modules/video_coding/generic_decoder.cc | 25 +- .../modules/video_coding/generic_decoder.h | 23 +- .../video_coding/h264_packet_buffer.cc | 287 ++ .../modules/video_coding/h264_packet_buffer.h | 56 + .../video_coding/h264_sprop_parameter_sets.h | 7 +- .../video_coding/h264_sps_pps_tracker.cc | 2 +- .../video_coding/h264_sps_pps_tracker.h | 2 +- .../include/video_codec_interface.h | 4 +- .../video_coding/include/video_coding.h | 17 +- .../include/video_coding_defines.h | 4 +- .../modules/video_coding/jitter_buffer.cc | 23 +- .../modules/video_coding/jitter_buffer.h | 35 +- .../modules/video_coding/jitter_estimator.cc | 17 +- .../loss_notification_controller.cc | 6 +- .../loss_notification_controller.h | 10 +- .../modules/video_coding/media_opt_util.cc | 15 +- .../modules/video_coding/media_opt_util.h | 6 +- .../{nack_module2.cc => nack_requester.cc} | 141 +- .../{nack_module2.h => nack_requester.h} | 68 +- .../modules/video_coding/packet_buffer.cc | 10 +- .../modules/video_coding/packet_buffer.h | 4 +- .../webrtc/modules/video_coding/receiver.cc | 4 +- .../rtp_frame_reference_finder.cc | 27 +- .../video_coding/rtp_frame_reference_finder.h | 37 +- .../rtp_seq_num_only_ref_finder.cc | 2 +- .../video_coding/rtp_vp8_ref_finder.cc | 4 +- .../video_coding/rtp_vp9_ref_finder.cc | 4 +- .../modules/video_coding/rtp_vp9_ref_finder.h | 2 +- .../modules/video_coding/session_info.cc | 9 +- .../modules/video_coding/session_info.h | 10 +- .../svc/create_scalability_structure.cc | 110 +- .../svc/create_scalability_structure.h | 6 + .../svc/scalability_structure_full_svc.cc | 9 +- .../svc/scalability_structure_key_svc.cc | 5 +- .../scalability_structure_l2t2_key_shift.cc | 1 + .../svc/scalability_structure_simulcast.cc | 5 +- .../svc/scalable_video_controller.h | 2 + .../scalable_video_controller_no_layering.cc | 1 + .../video_coding/svc/svc_rate_allocator.cc | 6 +- .../modules/video_coding/timestamp_map.cc | 2 +- .../webrtc/modules/video_coding/timing.cc | 52 +- .../voip/webrtc/modules/video_coding/timing.h | 32 +- .../video_coding/unique_timestamp_counter.h | 2 +- .../utility/bandwidth_quality_scaler.cc | 150 + .../utility/bandwidth_quality_scaler.h | 95 + .../utility/decoded_frames_history.cc | 7 +- .../utility/decoded_frames_history.h | 6 +- .../video_coding/utility/frame_dropper.cc | 2 +- .../video_coding/utility/frame_dropper.h | 8 +- ....cc => framerate_controller_deprecated.cc} | 18 +- ...er.h => framerate_controller_deprecated.h} | 11 +- .../video_coding/utility/ivf_defines.h | 23 + .../video_coding/utility/ivf_file_reader.cc | 14 +- .../video_coding/utility/ivf_file_reader.h | 6 +- .../video_coding/utility/ivf_file_writer.cc | 34 +- .../video_coding/utility/ivf_file_writer.h | 10 +- .../video_coding/utility/quality_scaler.cc | 4 +- .../video_coding/utility/quality_scaler.h | 2 +- .../utility/simulcast_rate_allocator.h | 6 +- .../utility/simulcast_test_fixture_impl.cc | 18 +- .../utility/simulcast_test_fixture_impl.h | 6 +- .../video_coding/utility/vp9_constants.h | 198 + .../utility/vp9_uncompressed_header_parser.cc | 771 ++-- .../utility/vp9_uncompressed_header_parser.h | 95 +- .../video_coding/video_codec_initializer.cc | 10 +- .../modules/video_coding/video_coding_impl.cc | 9 +- .../modules/video_coding/video_coding_impl.h | 7 +- .../modules/video_coding/video_receiver.cc | 17 +- .../modules/video_coding/video_receiver2.cc | 19 +- .../modules/video_coding/video_receiver2.h | 10 +- .../util/denoiser_filter_c.cc | 2 +- .../video_processing/util/skin_detection.h | 10 +- .../webrtc/net/dcsctp/common/internal_types.h | 26 +- .../voip/webrtc/net/dcsctp/common/pair_hash.h | 31 - .../net/dcsctp/common/sequence_numbers.h | 2 +- .../net/dcsctp/fuzzers/dcsctp_fuzzers.h | 6 +- .../net/dcsctp/packet/bounded_byte_reader.h | 4 +- .../net/dcsctp/packet/bounded_byte_writer.h | 4 +- .../net/dcsctp/packet/chunk/data_chunk.cc | 2 +- .../net/dcsctp/packet/chunk/data_common.h | 2 +- .../jni/voip/webrtc/net/dcsctp/packet/data.h | 4 +- .../missing_mandatory_parameter_cause.cc | 4 +- .../webrtc/net/dcsctp/packet/sctp_packet.cc | 4 +- .../webrtc/net/dcsctp/packet/sctp_packet.h | 1 - .../dcsctp/public/dcsctp_handover_state.cc | 68 + .../net/dcsctp/public/dcsctp_handover_state.h | 132 + .../webrtc/net/dcsctp/public/dcsctp_options.h | 71 +- .../webrtc/net/dcsctp/public/dcsctp_socket.h | 178 +- .../dcsctp/public/dcsctp_socket_factory.cc | 31 + .../net/dcsctp/public/dcsctp_socket_factory.h | 31 + .../net/dcsctp/public/mock_dcsctp_socket.h | 82 + .../public/text_pcap_packet_observer.cc | 54 + .../dcsctp/public/text_pcap_packet_observer.h | 46 + .../jni/voip/webrtc/net/dcsctp/public/types.h | 30 +- .../voip/webrtc/net/dcsctp/rx/data_tracker.cc | 206 +- .../voip/webrtc/net/dcsctp/rx/data_tracker.h | 74 +- .../webrtc/net/dcsctp/rx/reassembly_queue.cc | 63 +- .../webrtc/net/dcsctp/rx/reassembly_queue.h | 15 +- .../webrtc/net/dcsctp/rx/reassembly_streams.h | 4 + .../rx/traditional_reassembly_streams.cc | 80 +- .../rx/traditional_reassembly_streams.h | 28 +- .../net/dcsctp/socket/callback_deferrer.cc | 161 + .../net/dcsctp/socket/callback_deferrer.h | 153 +- .../webrtc/net/dcsctp/socket/dcsctp_socket.cc | 334 +- .../webrtc/net/dcsctp/socket/dcsctp_socket.h | 34 +- .../net/dcsctp/socket/heartbeat_handler.cc | 19 +- .../webrtc/net/dcsctp/socket/mock_context.h | 2 +- .../socket/mock_dcsctp_socket_callbacks.h | 22 +- .../webrtc/net/dcsctp/socket/packet_sender.cc | 48 + .../webrtc/net/dcsctp/socket/packet_sender.h | 40 + .../net/dcsctp/socket/stream_reset_handler.cc | 17 +- .../net/dcsctp/socket/stream_reset_handler.h | 24 +- .../socket/transmission_control_block.cc | 97 +- .../socket/transmission_control_block.h | 124 +- .../net/dcsctp/testing/data_generator.h | 4 +- .../webrtc/net/dcsctp/timer/fake_timeout.h | 18 +- .../net/dcsctp/timer/task_queue_timeout.cc | 7 +- .../net/dcsctp/timer/task_queue_timeout.h | 10 +- .../jni/voip/webrtc/net/dcsctp/timer/timer.cc | 26 +- .../jni/voip/webrtc/net/dcsctp/timer/timer.h | 51 +- .../webrtc/net/dcsctp/tx/fcfs_send_queue.cc | 250 -- .../webrtc/net/dcsctp/tx/fcfs_send_queue.h | 123 - .../webrtc/net/dcsctp/tx/mock_send_queue.h | 12 +- .../webrtc/net/dcsctp/tx/outstanding_data.cc | 479 ++ .../webrtc/net/dcsctp/tx/outstanding_data.h | 285 ++ .../dcsctp/tx/retransmission_error_counter.cc | 4 +- .../dcsctp/tx/retransmission_error_counter.h | 4 +- .../net/dcsctp/tx/retransmission_queue.cc | 499 +-- .../net/dcsctp/tx/retransmission_queue.h | 215 +- .../net/dcsctp/tx/retransmission_timeout.cc | 48 +- .../net/dcsctp/tx/retransmission_timeout.h | 23 +- .../webrtc/net/dcsctp/tx/rr_send_queue.cc | 491 ++ .../voip/webrtc/net/dcsctp/tx/rr_send_queue.h | 244 + .../voip/webrtc/net/dcsctp/tx/send_queue.h | 23 +- TMessagesProj/jni/voip/webrtc/p2p/OWNERS | 1 - .../webrtc/p2p/base/async_stun_tcp_socket.cc | 19 +- .../webrtc/p2p/base/async_stun_tcp_socket.h | 21 +- .../p2p/base/basic_async_resolver_factory.cc | 95 +- .../webrtc/p2p/base/basic_ice_controller.cc | 47 +- .../webrtc/p2p/base/basic_ice_controller.h | 26 +- .../p2p/base/basic_packet_socket_factory.cc | 72 +- .../p2p/base/basic_packet_socket_factory.h | 25 +- .../jni/voip/webrtc/p2p/base/connection.cc | 292 +- .../jni/voip/webrtc/p2p/base/connection.h | 248 +- .../voip/webrtc/p2p/base/connection_info.cc | 1 + .../voip/webrtc/p2p/base/connection_info.h | 7 +- .../voip/webrtc/p2p/base/dtls_transport.cc | 108 +- .../jni/voip/webrtc/p2p/base/dtls_transport.h | 24 +- .../p2p/base/dtls_transport_internal.cc | 18 - .../webrtc/p2p/base/dtls_transport_internal.h | 48 +- .../webrtc/p2p/base/fake_dtls_transport.h | 17 +- .../voip/webrtc/p2p/base/fake_ice_transport.h | 8 +- .../webrtc/p2p/base/fake_packet_transport.h | 4 +- .../webrtc/p2p/base/fake_port_allocator.h | 21 +- .../p2p/base/ice_controller_interface.h | 6 +- .../webrtc/p2p/base/ice_transport_internal.h | 14 +- .../voip/webrtc/p2p/base/mock_ice_transport.h | 2 +- .../webrtc/p2p/base/p2p_transport_channel.cc | 153 +- .../webrtc/p2p/base/p2p_transport_channel.h | 32 +- .../p2p_transport_channel_ice_field_trials.h | 5 +- .../p2p/base/packet_transport_internal.h | 2 +- .../jni/voip/webrtc/p2p/base/port.cc | 31 +- TMessagesProj/jni/voip/webrtc/p2p/base/port.h | 30 +- .../voip/webrtc/p2p/base/port_allocator.cc | 9 +- .../jni/voip/webrtc/p2p/base/port_allocator.h | 27 +- .../jni/voip/webrtc/p2p/base/pseudo_tcp.cc | 10 +- .../jni/voip/webrtc/p2p/base/pseudo_tcp.h | 14 +- .../jni/voip/webrtc/p2p/base/stun_port.cc | 95 +- .../jni/voip/webrtc/p2p/base/stun_port.h | 55 +- .../jni/voip/webrtc/p2p/base/stun_request.cc | 13 +- .../jni/voip/webrtc/p2p/base/stun_request.h | 17 +- .../jni/voip/webrtc/p2p/base/stun_server.h | 2 +- .../jni/voip/webrtc/p2p/base/tcp_port.cc | 116 +- .../jni/voip/webrtc/p2p/base/tcp_port.h | 27 +- .../voip/webrtc/p2p/base/test_stun_server.cc | 4 +- .../voip/webrtc/p2p/base/test_turn_server.h | 38 +- .../p2p/base/transport_description_factory.cc | 25 +- .../p2p/base/transport_description_factory.h | 4 +- .../jni/voip/webrtc/p2p/base/turn_port.cc | 112 +- .../jni/voip/webrtc/p2p/base/turn_port.h | 142 +- .../jni/voip/webrtc/p2p/base/turn_server.cc | 34 +- .../jni/voip/webrtc/p2p/base/turn_server.h | 19 +- .../webrtc/p2p/client/basic_port_allocator.cc | 231 +- .../webrtc/p2p/client/basic_port_allocator.h | 86 +- .../p2p/client/relay_port_factory_interface.h | 1 - .../webrtc/p2p/client/turn_port_factory.cc | 5 +- .../voip/webrtc/p2p/stunprober/stun_prober.cc | 8 +- .../voip/webrtc/p2p/stunprober/stun_prober.h | 30 +- TMessagesProj/jni/voip/webrtc/pc/OWNERS | 1 - .../jni/voip/webrtc/pc/audio_rtp_receiver.cc | 170 +- .../jni/voip/webrtc/pc/audio_rtp_receiver.h | 31 +- .../jni/voip/webrtc/pc/audio_track.cc | 10 +- .../jni/voip/webrtc/pc/audio_track.h | 9 +- TMessagesProj/jni/voip/webrtc/pc/channel.cc | 735 ++- TMessagesProj/jni/voip/webrtc/pc/channel.h | 203 +- .../jni/voip/webrtc/pc/channel_interface.h | 56 +- .../jni/voip/webrtc/pc/channel_manager.cc | 112 +- .../jni/voip/webrtc/pc/channel_manager.h | 46 +- .../jni/voip/webrtc/pc/connection_context.cc | 88 +- .../jni/voip/webrtc/pc/connection_context.h | 2 + .../voip/webrtc/pc/data_channel_controller.cc | 25 +- .../voip/webrtc/pc/data_channel_controller.h | 13 +- .../jni/voip/webrtc/pc/dtls_srtp_transport.cc | 25 +- .../jni/voip/webrtc/pc/dtls_srtp_transport.h | 8 +- .../jni/voip/webrtc/pc/dtls_transport.cc | 55 +- .../jni/voip/webrtc/pc/dtls_transport.h | 2 +- .../jni/voip/webrtc/pc/dtmf_sender.cc | 10 +- .../jni/voip/webrtc/pc/dtmf_sender.h | 18 +- .../jni/voip/webrtc/pc/external_hmac.cc | 6 +- .../jni/voip/webrtc/pc/external_hmac.h | 1 + .../jni/voip/webrtc/pc/ice_server_parsing.cc | 32 +- .../jni/voip/webrtc/pc/ice_server_parsing.h | 6 +- .../jni/voip/webrtc/pc/jitter_buffer_delay.h | 1 + .../jni/voip/webrtc/pc/jsep_ice_candidate.cc | 2 - .../webrtc/pc/jsep_session_description.cc | 21 +- .../jni/voip/webrtc/pc/jsep_transport.cc | 94 +- .../jni/voip/webrtc/pc/jsep_transport.h | 29 +- .../webrtc/pc/jsep_transport_collection.cc | 374 ++ .../webrtc/pc/jsep_transport_collection.h | 175 + .../webrtc/pc/jsep_transport_controller.cc | 376 +- .../webrtc/pc/jsep_transport_controller.h | 89 +- .../voip/webrtc/pc/media_protocol_names.cc | 59 +- .../jni/voip/webrtc/pc/media_protocol_names.h | 30 +- .../jni/voip/webrtc/pc/media_session.cc | 713 +-- .../jni/voip/webrtc/pc/media_session.h | 7 +- .../jni/voip/webrtc/pc/media_stream.cc | 17 +- .../jni/voip/webrtc/pc/media_stream.h | 2 +- .../voip/webrtc/pc/media_stream_observer.cc | 27 +- .../voip/webrtc/pc/media_stream_observer.h | 31 +- .../webrtc/{api => pc}/media_stream_proxy.h | 14 +- .../{api => pc}/media_stream_track_proxy.h | 24 +- .../jni/voip/webrtc/pc/peer_connection.cc | 237 +- .../jni/voip/webrtc/pc/peer_connection.h | 134 +- ...r_connection_adaptation_integrationtest.cc | 13 +- .../voip/webrtc/pc/peer_connection_factory.cc | 33 +- .../voip/webrtc/pc/peer_connection_factory.h | 6 + .../peer_connection_factory_proxy.h | 14 +- .../voip/webrtc/pc/peer_connection_internal.h | 120 +- .../pc/peer_connection_message_handler.cc | 3 +- .../{api => pc}/peer_connection_proxy.h | 23 +- .../voip/webrtc/pc/peer_connection_wrapper.cc | 14 +- TMessagesProj/jni/voip/webrtc/pc/proxy.cc | 25 + .../jni/voip/webrtc/{api => pc}/proxy.h | 368 +- .../jni/voip/webrtc/pc/remote_audio_source.cc | 8 +- .../jni/voip/webrtc/pc/rtc_stats_collector.cc | 238 +- .../jni/voip/webrtc/pc/rtc_stats_collector.h | 56 +- .../webrtc/pc/rtc_stats_integrationtest.cc | 65 +- .../jni/voip/webrtc/pc/rtc_stats_traversal.cc | 6 +- .../jni/voip/webrtc/pc/rtc_stats_traversal.h | 6 +- .../jni/voip/webrtc/pc/rtcp_mux_filter.cc | 3 +- .../jni/voip/webrtc/pc/rtp_media_utils.cc | 4 +- .../jni/voip/webrtc/pc/rtp_media_utils.h | 6 +- .../webrtc/pc/rtp_parameters_conversion.cc | 2 +- .../webrtc/pc/rtp_parameters_conversion.h | 3 +- .../jni/voip/webrtc/pc/rtp_receiver.cc | 4 +- .../jni/voip/webrtc/pc/rtp_receiver.h | 21 +- .../jni/voip/webrtc/pc/rtp_receiver_proxy.h | 54 + .../jni/voip/webrtc/pc/rtp_sender.cc | 34 +- TMessagesProj/jni/voip/webrtc/pc/rtp_sender.h | 68 +- .../jni/voip/webrtc/pc/rtp_sender_proxy.h | 51 + .../jni/voip/webrtc/pc/rtp_transceiver.cc | 77 +- .../jni/voip/webrtc/pc/rtp_transceiver.h | 47 +- .../webrtc/pc/rtp_transmission_manager.cc | 37 +- .../voip/webrtc/pc/rtp_transmission_manager.h | 12 +- .../jni/voip/webrtc/pc/rtp_transport.cc | 6 +- .../jni/voip/webrtc/pc/rtp_transport.h | 1 + .../voip/webrtc/pc/rtp_transport_internal.h | 2 +- .../jni/voip/webrtc/pc/sctp_data_channel.cc | 38 +- .../jni/voip/webrtc/pc/sctp_data_channel.h | 15 +- .../webrtc/pc/sctp_data_channel_transport.cc | 7 +- .../webrtc/pc/sctp_data_channel_transport.h | 2 +- .../jni/voip/webrtc/pc/sctp_transport.cc | 11 +- .../jni/voip/webrtc/pc/sctp_transport.h | 6 +- .../jni/voip/webrtc/pc/sctp_utils.cc | 4 +- .../jni/voip/webrtc/pc/sdp_offer_answer.cc | 1786 ++++---- .../jni/voip/webrtc/pc/sdp_offer_answer.h | 160 +- .../jni/voip/webrtc/pc/sdp_serializer.cc | 24 +- .../jni/voip/webrtc/pc/sdp_serializer.h | 2 +- TMessagesProj/jni/voip/webrtc/pc/sdp_utils.cc | 2 +- .../jni/voip/webrtc/pc/session_description.cc | 15 +- .../jni/voip/webrtc/pc/session_description.h | 26 +- .../voip/webrtc/pc/simulcast_description.cc | 2 +- .../voip/webrtc/pc/simulcast_description.h | 4 +- .../jni/voip/webrtc/pc/srtp_filter.cc | 8 +- .../jni/voip/webrtc/pc/srtp_filter.h | 1 - .../jni/voip/webrtc/pc/srtp_session.cc | 15 +- .../jni/voip/webrtc/pc/srtp_session.h | 10 +- .../jni/voip/webrtc/pc/srtp_transport.cc | 23 +- .../jni/voip/webrtc/pc/stats_collector.cc | 53 +- .../jni/voip/webrtc/pc/stats_collector.h | 17 +- .../jni/voip/webrtc/pc/stream_collection.h | 5 +- .../voip/webrtc/pc/track_media_info_map.cc | 4 +- .../jni/voip/webrtc/pc/transceiver_list.cc | 2 + .../jni/voip/webrtc/pc/transport_stats.h | 7 +- .../jni/voip/webrtc/pc/usage_pattern.h | 6 +- TMessagesProj/jni/voip/webrtc/pc/used_ids.h | 11 +- .../jni/voip/webrtc/pc/video_rtp_receiver.cc | 164 +- .../jni/voip/webrtc/pc/video_rtp_receiver.h | 32 +- .../voip/webrtc/pc/video_rtp_track_source.h | 8 +- .../jni/voip/webrtc/pc/video_track.cc | 95 +- .../jni/voip/webrtc/pc/video_track.h | 42 +- .../jni/voip/webrtc/pc/video_track_source.cc | 1 + .../jni/voip/webrtc/pc/video_track_source.h | 13 +- .../webrtc/pc/video_track_source_proxy.cc | 27 + .../{api => pc}/video_track_source_proxy.h | 25 +- .../jni/voip/webrtc/pc/webrtc_sdp.cc | 182 +- TMessagesProj/jni/voip/webrtc/pc/webrtc_sdp.h | 14 +- .../pc/webrtc_session_description_factory.cc | 29 +- .../pc/webrtc_session_description_factory.h | 13 +- TMessagesProj/jni/voip/webrtc/rtc_base/OWNERS | 1 - .../jni/voip/webrtc/rtc_base/async_invoker.cc | 28 +- .../jni/voip/webrtc/rtc_base/async_invoker.h | 29 +- .../voip/webrtc/rtc_base/async_invoker_inl.h | 2 +- .../webrtc/rtc_base/async_packet_socket.h | 27 +- .../voip/webrtc/rtc_base/async_resolver.cc | 40 +- .../rtc_base/async_resolver_interface.h | 10 +- .../jni/voip/webrtc/rtc_base/async_socket.cc | 43 +- .../jni/voip/webrtc/rtc_base/async_socket.h | 51 +- .../voip/webrtc/rtc_base/async_tcp_socket.cc | 191 +- .../voip/webrtc/rtc_base/async_tcp_socket.h | 67 +- .../voip/webrtc/rtc_base/async_udp_socket.cc | 13 +- .../voip/webrtc/rtc_base/async_udp_socket.h | 15 +- .../jni/voip/webrtc/rtc_base/bit_buffer.cc | 235 +- .../jni/voip/webrtc/rtc_base/bit_buffer.h | 52 +- .../voip/webrtc/rtc_base/bitstream_reader.cc | 135 + .../voip/webrtc/rtc_base/bitstream_reader.h | 145 + .../webrtc/rtc_base/boringssl_certificate.cc | 4 +- .../webrtc/rtc_base/boringssl_certificate.h | 7 +- .../voip/webrtc/rtc_base/boringssl_identity.h | 6 +- .../jni/voip/webrtc/rtc_base/buffer.h | 16 +- .../jni/voip/webrtc/rtc_base/buffer_queue.h | 8 +- .../jni/voip/webrtc/rtc_base/byte_buffer.h | 25 +- .../jni/voip/webrtc/rtc_base/checks.h | 10 +- .../voip/webrtc/rtc_base/containers/BUILD.gn | 59 + .../webrtc/rtc_base/containers/as_const.h | 32 + .../webrtc/rtc_base/containers/flat_map.h | 374 ++ .../webrtc/rtc_base/containers/flat_set.h | 178 + .../containers/flat_tree.cc} | 14 +- .../webrtc/rtc_base/containers/flat_tree.h | 1102 +++++ .../webrtc/rtc_base/containers/identity.h | 36 + .../voip/webrtc/rtc_base/containers/invoke.h | 162 + .../rtc_base/containers/move_only_int.h | 74 + .../voip/webrtc/rtc_base/containers/not_fn.h | 64 + .../voip/webrtc/rtc_base/containers/void_t.h | 36 + .../webrtc/rtc_base/copy_on_write_buffer.h | 22 +- .../jni/voip/webrtc/rtc_base/cpu_time.cc | 16 +- .../jni/voip/webrtc/rtc_base/crc32.h | 4 +- .../deprecated/recursive_critical_section.h | 7 +- .../jni/voip/webrtc/rtc_base/event_tracer.h | 12 +- .../balanced_degradation_settings.h | 18 +- .../bandwidth_quality_scaler_settings.cc | 43 + .../bandwidth_quality_scaler_settings.h | 35 + .../experiments/cpu_speed_experiment.h | 14 +- .../experiments/encoder_info_settings.cc | 94 +- .../experiments/encoder_info_settings.h | 9 + .../rtc_base/experiments/field_trial_list.cc | 2 +- .../rtc_base/experiments/field_trial_list.h | 4 +- .../experiments/field_trial_parser.cc | 63 +- .../rtc_base/experiments/field_trial_parser.h | 7 +- .../min_video_bitrate_experiment.cc | 2 +- .../experiments/quality_rampup_experiment.cc | 7 +- .../experiments/quality_rampup_experiment.h | 5 +- .../experiments/quality_scaling_experiment.h | 8 +- .../experiments/struct_parameters_parser.h | 2 +- .../jni/voip/webrtc/rtc_base/fake_network.h | 36 +- .../voip/webrtc/rtc_base/fake_ssl_identity.cc | 6 +- .../webrtc/rtc_base/file_rotating_stream.cc | 4 +- .../webrtc/rtc_base/file_rotating_stream.h | 16 +- .../webrtc/rtc_base/firewall_socket_server.cc | 14 +- .../webrtc/rtc_base/firewall_socket_server.h | 3 - TMessagesProj/jni/voip/webrtc/rtc_base/hash.h | 32 - .../jni/voip/webrtc/rtc_base/http_common.cc | 6 +- .../voip/webrtc/rtc_base/ifaddrs_android.cc | 17 +- .../jni/voip/webrtc/rtc_base/location.h | 2 +- .../jni/voip/webrtc/rtc_base/log_sinks.h | 14 +- .../jni/voip/webrtc/rtc_base/logging.cc | 53 +- .../jni/voip/webrtc/rtc_base/logging.h | 27 +- .../rtc_base/mdns_responder_interface.h | 12 +- .../webrtc/rtc_base/memory/aligned_malloc.cc | 2 +- .../webrtc/rtc_base/memory/aligned_malloc.h | 8 +- .../webrtc/rtc_base/memory/fifo_buffer.cc | 71 +- .../voip/webrtc/rtc_base/memory/fifo_buffer.h | 47 +- .../jni/voip/webrtc/rtc_base/message_digest.h | 32 +- .../voip/webrtc/rtc_base/message_handler.h | 8 +- .../jni/voip/webrtc/rtc_base/nat_server.cc | 4 +- .../jni/voip/webrtc/rtc_base/nat_server.h | 5 +- .../webrtc/rtc_base/nat_socket_factory.cc | 50 +- .../voip/webrtc/rtc_base/nat_socket_factory.h | 36 +- .../jni/voip/webrtc/rtc_base/nat_types.cc | 2 +- .../jni/voip/webrtc/rtc_base/net_helpers.cc | 6 +- .../jni/voip/webrtc/rtc_base/network.cc | 253 +- .../jni/voip/webrtc/rtc_base/network.h | 68 +- .../voip/webrtc/rtc_base/network_constants.cc | 2 +- .../voip/webrtc/rtc_base/network_constants.h | 15 + .../voip/webrtc/rtc_base/network_monitor.h | 28 +- .../webrtc/rtc_base/null_socket_server.cc | 8 +- .../voip/webrtc/rtc_base/null_socket_server.h | 2 - .../event_based_exponential_moving_average.cc | 4 +- .../event_based_exponential_moving_average.h | 4 +- .../rtc_base/numerics/event_rate_counter.h | 4 +- .../webrtc/rtc_base/numerics/exp_filter.h | 4 +- .../numerics/histogram_percentile_counter.cc | 2 +- .../numerics/histogram_percentile_counter.h | 6 +- .../webrtc/rtc_base/numerics/math_utils.h | 2 +- .../webrtc/rtc_base/numerics/moving_average.h | 4 +- .../rtc_base/numerics/moving_max_counter.h | 10 +- .../rtc_base/numerics/moving_median_filter.h | 10 +- .../rtc_base/numerics/percentile_filter.h | 8 +- .../webrtc/rtc_base/numerics/sample_counter.h | 8 +- .../rtc_base/numerics/sequence_number_util.h | 30 +- .../voip/webrtc/rtc_base/openssl_adapter.cc | 169 +- .../voip/webrtc/rtc_base/openssl_adapter.h | 54 +- .../webrtc/rtc_base/openssl_certificate.cc | 65 +- .../webrtc/rtc_base/openssl_certificate.h | 5 +- .../jni/voip/webrtc/rtc_base/openssl_digest.h | 6 +- .../voip/webrtc/rtc_base/openssl_identity.h | 6 +- .../voip/webrtc/rtc_base/openssl_key_pair.cc | 8 +- .../voip/webrtc/rtc_base/openssl_key_pair.h | 6 +- .../webrtc/rtc_base/openssl_session_cache.h | 6 +- .../webrtc/rtc_base/openssl_stream_adapter.cc | 24 +- .../webrtc/rtc_base/openssl_stream_adapter.h | 6 +- .../voip/webrtc/rtc_base/operations_chain.cc | 13 +- .../voip/webrtc/rtc_base/operations_chain.h | 28 +- .../webrtc/rtc_base/physical_socket_server.cc | 132 +- .../webrtc/rtc_base/physical_socket_server.h | 7 +- .../voip/webrtc/rtc_base/platform_thread.h | 4 +- .../jni/voip/webrtc/rtc_base/proxy_server.cc | 32 +- .../jni/voip/webrtc/rtc_base/proxy_server.h | 46 +- .../jni/voip/webrtc/rtc_base/random.cc | 10 +- .../voip/webrtc/rtc_base/rate_statistics.cc | 2 +- .../jni/voip/webrtc/rtc_base/rate_tracker.h | 2 +- .../voip/webrtc/rtc_base/ref_counted_object.h | 83 +- .../webrtc/rtc_base/rolling_accumulator.h | 6 +- .../voip/webrtc/rtc_base/rtc_certificate.cc | 12 +- .../voip/webrtc/rtc_base/rtc_certificate.h | 12 +- .../rtc_base/rtc_certificate_generator.cc | 18 +- .../rtc_base/rtc_certificate_generator.h | 32 +- .../webrtc/rtc_base/server_socket_adapters.cc | 6 +- .../webrtc/rtc_base/server_socket_adapters.h | 15 +- .../jni/voip/webrtc/rtc_base/sigslot_tester.h | 31 +- .../voip/webrtc/rtc_base/sigslottester.h.pump | 12 +- .../jni/voip/webrtc/rtc_base/socket.h | 21 +- .../voip/webrtc/rtc_base/socket_adapters.cc | 55 +- .../voip/webrtc/rtc_base/socket_adapters.h | 35 +- .../jni/voip/webrtc/rtc_base/socket_factory.h | 7 +- .../jni/voip/webrtc/rtc_base/socket_stream.cc | 16 +- .../jni/voip/webrtc/rtc_base/socket_stream.h | 26 +- .../jni/voip/webrtc/rtc_base/ssl_adapter.cc | 6 +- .../jni/voip/webrtc/rtc_base/ssl_adapter.h | 32 +- .../voip/webrtc/rtc_base/ssl_certificate.cc | 16 +- .../voip/webrtc/rtc_base/ssl_certificate.h | 7 +- .../jni/voip/webrtc/rtc_base/ssl_identity.cc | 8 +- .../jni/voip/webrtc/rtc_base/ssl_identity.h | 10 +- .../webrtc/rtc_base/ssl_stream_adapter.cc | 61 +- .../voip/webrtc/rtc_base/ssl_stream_adapter.h | 34 +- .../jni/voip/webrtc/rtc_base/stream.h | 7 +- .../jni/voip/webrtc/rtc_base/string_encode.cc | 101 +- .../jni/voip/webrtc/rtc_base/string_encode.h | 39 +- .../jni/voip/webrtc/rtc_base/string_utils.h | 39 +- .../jni/voip/webrtc/rtc_base/strings/json.cc | 6 +- .../webrtc/rtc_base/strings/string_builder.cc | 2 +- .../webrtc/rtc_base/strings/string_builder.h | 6 +- .../dcsctp/public => rtc_base}/strong_alias.h | 21 +- .../webrtc/rtc_base/synchronization/mutex.h | 11 +- .../rtc_base/synchronization/mutex_abseil.h | 5 + .../synchronization/mutex_critical_section.h | 1 + .../rtc_base/synchronization/mutex_pthread.h | 55 +- .../synchronization/mutex_race_check.h | 65 - .../sequence_checker_internal.cc | 5 +- .../webrtc/rtc_base/system/file_wrapper.h | 6 +- .../jni/voip/webrtc/rtc_base/system/unused.h | 6 +- .../jni/voip/webrtc/rtc_base/system_time.cc | 2 +- .../jni/voip/webrtc/rtc_base/task_queue.cc | 6 + .../jni/voip/webrtc/rtc_base/task_queue.h | 25 +- .../webrtc/rtc_base/task_queue_for_test.h | 2 +- .../webrtc/rtc_base/task_queue_libevent.cc | 4 +- .../voip/webrtc/rtc_base/task_queue_stdlib.cc | 18 +- .../voip/webrtc/rtc_base/task_queue_win.cc | 18 +- .../task_utils/pending_task_safety_flag.cc | 16 +- .../task_utils/pending_task_safety_flag.h | 6 +- .../rtc_base/task_utils/repeating_task.cc | 59 +- .../rtc_base/task_utils/repeating_task.h | 96 +- .../jni/voip/webrtc/rtc_base/test_client.h | 5 +- .../voip/webrtc/rtc_base/test_echo_server.cc | 4 +- .../voip/webrtc/rtc_base/test_echo_server.h | 15 +- .../jni/voip/webrtc/rtc_base/test_utils.h | 26 +- .../rtc_base/third_party/base64/base64.cc | 15 +- .../rtc_base/third_party/base64/base64.h | 8 +- .../rtc_base/third_party/sigslot/sigslot.h | 10 +- .../jni/voip/webrtc/rtc_base/thread.cc | 55 +- .../jni/voip/webrtc/rtc_base/thread.h | 138 +- .../voip/webrtc/rtc_base/thread_annotations.h | 3 + .../jni/voip/webrtc/rtc_base/thread_message.h | 15 +- .../jni/voip/webrtc/rtc_base/time_utils.cc | 6 +- .../jni/voip/webrtc/rtc_base/time_utils.h | 6 + .../voip/webrtc/rtc_base/timestamp_aligner.cc | 6 +- .../voip/webrtc/rtc_base/timestamp_aligner.h | 19 +- .../jni/voip/webrtc/rtc_base/trace_event.h | 34 +- .../webrtc/rtc_base/unique_id_generator.h | 2 +- .../voip/webrtc/rtc_base/units/unit_base.h | 8 + .../webrtc/rtc_base/virtual_socket_server.cc | 349 +- .../webrtc/rtc_base/virtual_socket_server.h | 362 +- .../rtc_base/win/create_direct3d_device.cc | 57 + .../rtc_base/win/create_direct3d_device.h | 34 + .../rtc_base/win/scoped_com_initializer.cc | 8 +- .../webrtc/rtc_base/win/windows_version.cc | 19 +- .../webrtc/rtc_base/win/windows_version.h | 9 +- .../jni/voip/webrtc/rtc_base/win32.cc | 26 +- .../jni/voip/webrtc/rtc_base/win32.h | 62 +- .../webrtc/rtc_base/win32_socket_server.cc | 817 ---- .../webrtc/rtc_base/win32_socket_server.h | 153 - .../jni/voip/webrtc/sdk/android/OWNERS | 3 +- .../WebRtcAudioManager_jni.h | 3 - .../generated_base_jni/Histogram_jni.h | 3 - .../generated_base_jni/JniCommon_jni.h | 3 - .../NetworkChangeDetector_jni.h | 3 - .../generated_base_jni/NetworkMonitor_jni.h | 3 - .../generated_base_jni/RefCounted_jni.h | 3 - .../BuiltinAudioDecoderFactoryFactory_jni.h | 3 - .../BuiltinAudioEncoderFactoryFactory_jni.h | 3 - .../ArrayList_jni.h | 153 +- .../BigInteger_jni.h | 2391 +++++----- .../Boolean_jni.h | 73 +- .../Double_jni.h | 113 +- .../generated_external_classes_jni/Enum_jni.h | 49 +- .../Integer_jni.h | 198 +- .../Iterable_jni.h | 15 +- .../Iterator_jni.h | 19 +- .../LinkedHashMap_jni.h | 73 +- .../generated_external_classes_jni/Long_jni.h | 193 +- .../generated_external_classes_jni/Map_jni.h | 687 ++- .../WebRtcAudioRecord_jni.h | 9 +- .../WebRtcAudioTrack_jni.h | 3 - .../JavaAudioDeviceModule_jni.h | 3 - .../LibaomAv1Decoder_jni.h | 3 - .../LibaomAv1Encoder_jni.h | 3 - .../LibvpxVp8Decoder_jni.h | 3 - .../LibvpxVp8Encoder_jni.h | 3 - .../LibvpxVp9Decoder_jni.h | 3 - .../LibvpxVp9Encoder_jni.h | 3 - .../generated_logging_jni/JNILogging_jni.h | 3 - .../generated_metrics_jni/Metrics_jni.h | 3 - .../generated_native_api_jni/JniHelper_jni.h | 3 - .../WebRtcClassLoader_jni.h | 3 - .../AddIceObserver_jni.h | 3 - .../AudioTrack_jni.h | 3 - .../CallSessionFileRotatingLogSink_jni.h | 3 - .../CandidatePairChangeEvent_jni.h | 3 - .../CryptoOptions_jni.h | 3 - .../DataChannel_jni.h | 3 - .../DtmfSender_jni.h | 3 - .../IceCandidateErrorEvent_jni.h | 72 + .../IceCandidate_jni.h | 3 - .../MediaConstraints_jni.h | 3 - .../MediaSource_jni.h | 3 - .../MediaStreamTrack_jni.h | 3 - .../MediaStream_jni.h | 3 - .../PeerConnectionFactory_jni.h | 3 - .../PeerConnection_jni.h | 67 +- .../RTCStatsCollectorCallback_jni.h | 3 - .../RTCStatsReport_jni.h | 3 - .../RTCStats_jni.h | 3 - .../RtcCertificatePem_jni.h | 3 - .../RtpParameters_jni.h | 3 - .../RtpReceiver_jni.h | 3 - .../RtpSender_jni.h | 3 - .../RtpTransceiver_jni.h | 3 - .../SSLCertificateVerifier_jni.h | 3 - .../SdpObserver_jni.h | 3 - .../SessionDescription_jni.h | 3 - .../StatsObserver_jni.h | 3 - .../StatsReport_jni.h | 3 - .../TurnCustomizer_jni.h | 3 - .../EglBase10Impl_jni.h | 3 - .../generated_video_jni/EncodedImage_jni.h | 3 - .../generated_video_jni/H264Utils_jni.h | 3 - .../generated_video_jni/JavaI420Buffer_jni.h | 3 - .../generated_video_jni/NV12Buffer_jni.h | 3 - .../generated_video_jni/NV21Buffer_jni.h | 3 - .../NativeAndroidVideoTrackSource_jni.h | 3 - .../NativeCapturerObserver_jni.h | 3 - .../TimestampAligner_jni.h | 3 - .../generated_video_jni/VideoCodecInfo_jni.h | 3 - .../VideoCodecStatus_jni.h | 3 - .../VideoDecoderFactory_jni.h | 3 - .../VideoDecoderFallback_jni.h | 3 - .../VideoDecoderWrapper_jni.h | 3 - .../generated_video_jni/VideoDecoder_jni.h | 3 - .../VideoEncoderFactory_jni.h | 3 - .../VideoEncoderFallback_jni.h | 3 - .../VideoEncoderWrapper_jni.h | 3 - .../generated_video_jni/VideoEncoder_jni.h | 140 +- .../generated_video_jni/VideoFrame_jni.h | 24 +- .../generated_video_jni/VideoSink_jni.h | 3 - .../generated_video_jni/VideoTrack_jni.h | 3 - .../WrappedNativeI420Buffer_jni.h | 3 - .../generated_video_jni/YuvHelper_jni.h | 3 - .../audio_device_android.cc | 8 +- .../sdk/android/native_api/codecs/wrapper.h | 2 +- .../android/native_api/jni/class_loader.cc | 2 +- .../sdk/android/native_api/jni/java_types.h | 16 +- .../android/native_api/jni/scoped_java_ref.h | 28 +- .../peerconnection/peer_connection_factory.cc | 5 +- .../peerconnection/peer_connection_factory.h | 3 +- .../native_api/stacktrace/stacktrace.cc | 8 +- .../sdk/android/src/jni/android_metrics.cc | 4 +- .../src/jni/android_network_monitor.cc | 16 +- .../android/src/jni/android_network_monitor.h | 2 +- .../src/jni/android_video_track_source.cc | 8 +- .../src/jni/android_video_track_source.h | 2 +- .../sdk/android/src/jni/audio_device/DEPS | 4 + .../src/jni/audio_device/aaudio_player.cc | 34 +- .../src/jni/audio_device/aaudio_player.h | 4 +- .../src/jni/audio_device/aaudio_recorder.cc | 42 +- .../src/jni/audio_device/aaudio_recorder.h | 4 +- .../src/jni/audio_device/aaudio_wrapper.cc | 52 +- .../jni/audio_device/audio_device_module.cc | 164 +- .../src/jni/audio_device/audio_record_jni.cc | 50 +- .../src/jni/audio_device/audio_record_jni.h | 15 +- .../src/jni/audio_device/audio_track_jni.cc | 30 +- .../src/jni/audio_device/audio_track_jni.h | 12 +- .../src/jni/audio_device/opensles_common.cc | 5 +- .../src/jni/audio_device/opensles_player.h | 4 +- .../src/jni/audio_device/opensles_recorder.h | 4 +- .../webrtc/sdk/android/src/jni/dav1d_codec.cc | 25 + .../android/src/jni/jni_generator_helper.cc | 8 +- .../android/src/jni/jni_generator_helper.h | 14 +- .../voip/webrtc/sdk/android/src/jni/jvm.cc | 6 +- .../jni/voip/webrtc/sdk/android/src/jni/jvm.h | 2 +- .../jni/{av1_codec.cc => libaom_av1_codec.cc} | 5 +- .../sdk/android/src/jni/logging/log_sink.cc | 2 +- .../sdk/android/src/jni/pc/media_stream.cc | 37 +- .../sdk/android/src/jni/pc/media_stream.h | 4 +- .../src/jni/pc/owned_factory_and_threads.cc | 4 +- .../src/jni/pc/owned_factory_and_threads.h | 5 + .../sdk/android/src/jni/pc/peer_connection.cc | 58 +- .../sdk/android/src/jni/pc/peer_connection.h | 8 + .../src/jni/pc/peer_connection_factory.cc | 34 +- .../src/jni/pc/peer_connection_factory.h | 3 +- .../rtc_stats_collector_callback_wrapper.cc | 19 +- .../sdk/android/src/jni/pc/rtp_receiver.cc | 5 +- .../sdk/android/src/jni/pc/rtp_receiver.h | 2 +- .../sdk/android/src/jni/pc/rtp_sender.cc | 5 +- .../sdk/android/src/jni/pc/rtp_transceiver.h | 2 +- .../sdk/android/src/jni/video_codec_info.cc | 19 +- .../android/src/jni/video_decoder_wrapper.cc | 25 +- .../android/src/jni/video_decoder_wrapper.h | 11 +- .../android/src/jni/video_encoder_wrapper.cc | 86 +- .../android/src/jni/video_encoder_wrapper.h | 15 +- .../webrtc/sdk/android/src/jni/video_frame.cc | 99 +- .../webrtc/sdk/android/src/jni/video_frame.h | 48 +- .../jni/voip/webrtc/sdk/media_constraints.cc | 33 +- .../jni/voip/webrtc/sdk/media_constraints.h | 14 +- .../jni/voip/webrtc/stats/rtc_stats.cc | 42 +- .../jni/voip/webrtc/stats/rtcstats_objects.cc | 60 +- .../webrtc/system_wrappers/include/clock.h | 14 +- .../include/denormal_disabler.h | 54 + .../webrtc/system_wrappers/include/metrics.h | 16 +- .../webrtc/system_wrappers/include/ntp_time.h | 12 +- .../include/rtp_to_ntp_estimator.h | 4 +- .../webrtc/system_wrappers/source/clock.cc | 21 +- .../system_wrappers/source/cpu_features.cc | 2 +- .../source/cpu_features_linux.cc | 1 + .../webrtc/system_wrappers/source/cpu_info.cc | 2 +- .../source/denormal_disabler.cc | 107 + .../system_wrappers/source/field_trial.cc | 2 +- .../webrtc/system_wrappers/source/metrics.cc | 13 +- .../source/rtp_to_ntp_estimator.cc | 2 +- .../jni/voip/webrtc/video/adaptation/OWNERS | 2 +- .../video/adaptation/balanced_constraint.cc | 8 +- .../bandwidth_quality_scaler_resource.cc | 85 + .../bandwidth_quality_scaler_resource.h | 64 + .../video/adaptation/overuse_frame_detector.h | 6 +- .../video/adaptation/pixel_limit_resource.cc | 12 +- .../quality_rampup_experiment_helper.cc | 18 +- .../quality_rampup_experiment_helper.h | 7 +- .../video_stream_encoder_resource.cc | 3 +- .../video_stream_encoder_resource_manager.cc | 120 +- .../video_stream_encoder_resource_manager.h | 23 +- .../voip/webrtc/video/alignment_adjuster.cc | 8 +- .../voip/webrtc/video/alignment_adjuster.h | 10 +- .../webrtc/video/buffered_frame_decryptor.cc | 5 - .../jni/voip/webrtc/video/call_stats.cc | 6 +- .../jni/voip/webrtc/video/call_stats.h | 18 +- .../jni/voip/webrtc/video/call_stats2.cc | 1 - .../jni/voip/webrtc/video/call_stats2.h | 10 +- .../voip/webrtc/video/cpu_scaling_tests.cc | 4 +- .../voip/webrtc/video/decode_synchronizer.cc | 186 + .../voip/webrtc/video/decode_synchronizer.h | 137 + .../webrtc/video/encoder_bitrate_adjuster.cc | 2 +- .../video/encoder_overshoot_detector.cc | 4 +- .../webrtc/video/encoder_overshoot_detector.h | 6 +- .../webrtc/video/encoder_rtcp_feedback.cc | 64 +- .../voip/webrtc/video/encoder_rtcp_feedback.h | 32 +- .../voip/webrtc/video/frame_buffer_proxy.cc | 574 +++ .../voip/webrtc/video/frame_buffer_proxy.h | 68 + .../webrtc/video/frame_cadence_adapter.cc | 744 +++ .../voip/webrtc/video/frame_cadence_adapter.h | 117 + .../webrtc/video/frame_decode_scheduler.h | 51 + .../voip/webrtc/video/frame_decode_timing.cc | 57 + .../voip/webrtc/video/frame_decode_timing.h | 53 + .../webrtc/video/frame_dumping_decoder.cc | 10 +- .../video/frame_encode_metadata_writer.cc | 73 +- .../video/frame_encode_metadata_writer.h | 6 +- .../voip/webrtc/video/pc_full_stack_tests.cc | 378 +- .../jni/voip/webrtc/video/picture_id_tests.cc | 8 +- .../video/quality_limitation_reason_tracker.h | 10 +- .../webrtc/video/quality_scaling_tests.cc | 109 +- .../webrtc/video/receive_statistics_proxy.cc | 26 +- .../webrtc/video/receive_statistics_proxy.h | 13 +- .../webrtc/video/receive_statistics_proxy2.cc | 136 +- .../webrtc/video/receive_statistics_proxy2.h | 4 +- .../webrtc/video/rtp_streams_synchronizer.h | 4 +- .../webrtc/video/rtp_streams_synchronizer2.h | 4 +- .../webrtc/video/rtp_video_stream_receiver.cc | 67 +- .../webrtc/video/rtp_video_stream_receiver.h | 28 +- .../video/rtp_video_stream_receiver2.cc | 148 +- .../webrtc/video/rtp_video_stream_receiver2.h | 137 +- ...eam_receiver_frame_transformer_delegate.cc | 11 +- ...ream_receiver_frame_transformer_delegate.h | 6 +- .../webrtc/video/send_statistics_proxy.cc | 38 +- .../voip/webrtc/video/send_statistics_proxy.h | 1 + .../jni/voip/webrtc/video/stats_counter.cc | 14 +- .../jni/voip/webrtc/video/stats_counter.h | 52 +- .../webrtc/video/stream_synchronization.h | 6 +- .../task_queue_frame_decode_scheduler.cc | 76 + .../video/task_queue_frame_decode_scheduler.h | 48 + .../video/test/mock_video_stream_encoder.h | 1 - .../jni/voip/webrtc/video/video_analyzer.cc | 19 +- .../jni/voip/webrtc/video/video_analyzer.h | 4 +- .../webrtc/video/video_quality_observer2.h | 2 +- .../voip/webrtc/video/video_quality_test.cc | 18 +- .../voip/webrtc/video/video_receive_stream.cc | 797 ---- .../voip/webrtc/video/video_receive_stream.h | 237 - .../webrtc/video/video_receive_stream2.cc | 345 +- .../voip/webrtc/video/video_receive_stream2.h | 104 +- .../video_receive_stream_timeout_tracker.cc | 81 + .../video_receive_stream_timeout_tracker.h | 65 + .../voip/webrtc/video/video_send_stream.cc | 197 +- .../jni/voip/webrtc/video/video_send_stream.h | 28 +- .../webrtc/video/video_send_stream_impl.cc | 305 +- .../webrtc/video/video_send_stream_impl.h | 88 +- .../webrtc/video/video_send_stream_tests.cc | 625 +-- .../video/video_source_sink_controller.cc | 11 +- .../video/video_source_sink_controller.h | 7 +- .../voip/webrtc/video/video_stream_decoder.cc | 2 +- .../webrtc/video/video_stream_decoder2.cc | 2 +- .../webrtc/video/video_stream_decoder_impl.cc | 105 +- .../webrtc/video/video_stream_decoder_impl.h | 19 +- .../voip/webrtc/video/video_stream_encoder.cc | 677 +-- .../voip/webrtc/video/video_stream_encoder.h | 110 +- TMessagesProj/src/main/AndroidManifest.xml | 5 - .../src/main/assets/bluebubbles.attheme | 2 +- .../src/main/assets/darkblue.attheme | 46 +- TMessagesProj/src/main/assets/night.attheme | 58 +- .../widget/LinearLayoutManager.java | 18 +- .../recyclerview/widget/RecyclerView.java | 11 +- .../telegram/messenger/AndroidUtilities.java | 257 +- .../telegram/messenger/ApplicationLoader.java | 25 +- .../org/telegram/messenger/BuildVars.java | 4 +- .../telegram/messenger/CharacterCompat.java | 22 + .../org/telegram/messenger/ChatObject.java | 65 +- .../messenger/ContactsController.java | 4 +- .../telegram/messenger/FileLoadOperation.java | 2 +- .../org/telegram/messenger/FileLoader.java | 56 +- .../messenger/FingerprintController.java | 141 + .../org/telegram/messenger/ImageLoader.java | 35 +- .../org/telegram/messenger/ImageReceiver.java | 5 +- .../java/org/telegram/messenger/Intro.java | 12 +- .../telegram/messenger/LocaleController.java | 32 + .../messenger/MediaDataController.java | 14 +- .../org/telegram/messenger/MessageObject.java | 7 + .../messenger/MessagesController.java | 23 +- .../telegram/messenger/MessagesStorage.java | 628 ++- .../messenger/NotificationCenter.java | 5 + .../telegram/messenger/OneUIUtilities.java | 22 + .../org/telegram/messenger/SharedConfig.java | 53 +- .../org/telegram/messenger/UserConfig.java | 3 +- .../messenger/camera/CameraController.java | 12 +- .../messenger/camera/CameraSession.java | 1 + .../telegram/messenger/camera/CameraView.java | 15 +- .../messenger/voip/NativeInstance.java | 15 +- .../telegram/messenger/voip/VoIPService.java | 58 +- .../main/java/org/telegram/tgnet/TLRPC.java | 156 +- .../org/telegram/ui/ActionBar/ActionBar.java | 51 +- .../ui/ActionBar/ActionBarLayout.java | 93 +- .../telegram/ui/ActionBar/ActionBarMenu.java | 19 +- .../telegram/ui/ActionBar/AlertDialog.java | 14 +- .../telegram/ui/ActionBar/BaseFragment.java | 47 +- .../telegram/ui/ActionBar/BottomSheet.java | 105 +- .../java/org/telegram/ui/ActionBar/Theme.java | 222 +- .../org/telegram/ui/ActionIntroActivity.java | 166 +- .../telegram/ui/Adapters/DialogsAdapter.java | 193 +- .../org/telegram/ui/Adapters/FiltersView.java | 1 - .../telegram/ui/Adapters/MentionsAdapter.java | 2 + .../java/org/telegram/ui/ArticleViewer.java | 39 +- .../telegram/ui/BasePermissionsActivity.java | 120 + .../telegram/ui/BlurSettingsBottomSheet.java | 137 + .../java/org/telegram/ui/BubbleActivity.java | 81 +- .../org/telegram/ui/CacheControlActivity.java | 149 +- .../org/telegram/ui/CameraScanActivity.java | 573 ++- .../ui/CancelAccountDeletionActivity.java | 1185 ----- .../telegram/ui/Cells/ChatMessageCell.java | 340 +- .../org/telegram/ui/Cells/CheckBoxCell.java | 17 +- .../org/telegram/ui/Cells/DialogCell.java | 52 +- .../telegram/ui/Cells/DialogsEmptyCell.java | 399 +- .../org/telegram/ui/Cells/LanguageCell.java | 48 +- .../telegram/ui/Cells/SharedAudioCell.java | 2 + .../telegram/ui/Cells/SharedDocumentCell.java | 13 +- .../ui/Cells/SharedPhotoVideoCell2.java | 8 - .../java/org/telegram/ui/Cells/TextCell.java | 13 + .../ui/Cells/TextSelectionHelper.java | 10 +- .../telegram/ui/Cells/TextSettingsCell.java | 6 +- .../org/telegram/ui/ChangeBioActivity.java | 3 +- .../org/telegram/ui/ChangeNameActivity.java | 6 +- .../org/telegram/ui/ChangePhoneActivity.java | 9 +- .../telegram/ui/ChangeUsernameActivity.java | 3 +- .../telegram/ui/ChannelAdminLogActivity.java | 4 +- .../telegram/ui/ChannelCreateActivity.java | 3 +- .../java/org/telegram/ui/ChatActivity.java | 454 +- .../org/telegram/ui/ChatUsersActivity.java | 2 - .../org/telegram/ui/CodeFieldContainer.java | 45 +- .../java/org/telegram/ui/CodeNumberField.java | 115 +- .../telegram/ui/Components/AlertsCreator.java | 76 +- .../AnimatedPhoneNumberEditText.java | 273 ++ .../ui/Components/AvatarDrawable.java | 5 +- .../ui/Components/AvatarsDarawable.java | 3 + .../ui/Components/BlockingUpdateView.java | 15 +- ...ameLayout.java => BlurredFrameLayout.java} | 25 +- .../ui/Components/BlurredLinearLayout.java | 75 + .../ui/Components/BlurredRecyclerView.java | 80 + .../ui/Components/BottomPagesView.java | 6 +- .../ui/Components/ChatActivityEnterView.java | 677 +-- .../ui/Components/ChatAttachAlert.java | 621 ++- .../ChatAttachAlertAudioLayout.java | 2 +- .../ChatAttachAlertContactsLayout.java | 2 +- .../ChatAttachAlertDocumentLayout.java | 20 +- .../ChatAttachAlertLocationLayout.java | 37 +- .../ChatAttachAlertPhotoLayout.java | 169 +- .../ChatAttachAlertPhotoLayoutPreview.java | 2068 +++++++++ .../Components/ChatAttachAlertPollLayout.java | 2 +- .../telegram/ui/Components/CheckBoxBase.java | 5 +- .../ui/Components/CheckBoxSquare.java | 4 +- .../telegram/ui/Components/ChevronView.java | 69 + .../Components/CustomPhoneKeyboardView.java | 277 ++ .../ui/Components/EditTextBoldCursor.java | 160 +- .../telegram/ui/Components/EditTextEmoji.java | 7 +- .../org/telegram/ui/Components/EmojiView.java | 6 +- .../ui/Components/EmptyTextProgressView.java | 38 +- .../ui/Components/FlickerLoadingView.java | 100 +- .../ui/Components/FragmentContextView.java | 44 +- .../FragmentContextViewWavesDrawable.java | 4 +- .../GroupCallFullscreenAdapter.java | 16 +- .../telegram/ui/Components/GroupCallPip.java | 4 + .../ui/Components/GroupCallPipAlertView.java | 2 +- .../telegram/ui/Components/HintEditText.java | 56 +- .../telegram/ui/Components/ImageUpdater.java | 62 +- .../ui/Components/MediaActionDrawable.java | 66 +- .../telegram/ui/Components/MediaActivity.java | 38 +- .../Components/OutlineTextContainerView.java | 170 + .../ui/Components/PagerSlidingTabStrip.java | 13 +- .../telegram/ui/Components/PasscodeView.java | 86 +- .../ui/Components/PollVotesAlert.java | 2 +- .../ui/Components/RLottieDrawable.java | 26 +- .../ui/Components/RLottieImageView.java | 9 + .../ui/Components/RadialProgress2.java | 7 +- .../ui/Components/RadialProgressView.java | 23 +- .../ui/Components/ReactedUsersListView.java | 11 +- .../Reactions/ReactionsEffectOverlay.java | 2 +- .../Reactions/ReactionsLayoutInBubble.java | 40 +- .../Components/ReactionsContainerLayout.java | 2 +- .../RecyclerAnimationScrollHelper.java | 3 +- .../ui/Components/RecyclerListView.java | 113 +- .../telegram/ui/Components/ReportAlert.java | 3 +- .../Components/SearchDownloadsContainer.java | 589 +++ .../ui/Components/SearchViewPager.java | 237 +- .../ui/Components/SenderSelectPopup.java | 400 ++ .../ui/Components/SenderSelectView.java | 167 +- .../ui/Components/SharedMediaLayout.java | 123 +- .../ui/Components/SimpleAvatarView.java | 25 +- .../Components/SimpleFloatPropertyCompat.java | 38 + .../Components/SizeNotifierFrameLayout.java | 216 +- .../org/telegram/ui/Components/SlideView.java | 8 +- .../ui/Components/StickerEmptyView.java | 43 +- .../ui/Components/StickerImageView.java | 8 +- .../telegram/ui/Components/StickersAlert.java | 3 +- .../ui/Components/TermsOfServiceView.java | 2 +- .../ui/Components/ThemeEditorView.java | 3 +- .../TransformableLoginButtonView.java | 188 + .../org/telegram/ui/Components/UndoView.java | 62 +- .../VerticalPositionAutoAnimator.java | 43 +- .../ui/Components/ViewPagerFixed.java | 48 +- .../ui/Components/WallpaperUpdater.java | 3 +- .../ui/Components/spoilers/SpoilerEffect.java | 49 +- .../Components/spoilers/SpoilersTextView.java | 4 +- .../voip/GroupCallMiniTextureView.java | 76 +- .../voip/GroupCallRenderersContainer.java | 100 +- .../Components/voip/RTMPStreamPipOverlay.java | 736 +++ .../ui/Components/voip/VoIPHelper.java | 28 +- .../ui/Components/voip/VoIPTextureView.java | 29 + .../org/telegram/ui/ContactAddActivity.java | 12 +- .../org/telegram/ui/ContactsActivity.java | 69 +- .../telegram/ui/CountrySelectActivity.java | 144 +- .../org/telegram/ui/DataUsageActivity.java | 2 +- .../java/org/telegram/ui/DialogsActivity.java | 499 ++- .../org/telegram/ui/DownloadProgressIcon.java | 235 + .../org/telegram/ui/FilterCreateActivity.java | 4 +- .../org/telegram/ui/FilteredSearchView.java | 8 +- .../org/telegram/ui/FiltersSetupActivity.java | 2 +- .../org/telegram/ui/GroupCallActivity.java | 477 +- .../telegram/ui/GroupCreateFinalActivity.java | 2 +- .../java/org/telegram/ui/IntroActivity.java | 470 +- .../java/org/telegram/ui/LaunchActivity.java | 286 +- .../org/telegram/ui/LocationActivity.java | 8 +- .../java/org/telegram/ui/LoginActivity.java | 3973 ++++++++++++----- .../java/org/telegram/ui/LogoutActivity.java | 8 +- .../telegram/ui/MediaCalendarActivity.java | 642 --- .../java/org/telegram/ui/MessageSeenView.java | 14 +- .../org/telegram/ui/NewContactActivity.java | 17 +- .../org/telegram/ui/PasscodeActivity.java | 1386 ++++-- .../org/telegram/ui/PassportActivity.java | 12 +- .../java/org/telegram/ui/PhotoViewer.java | 11 +- .../ui/PopupNotificationActivity.java | 2 +- .../telegram/ui/PrivacyControlActivity.java | 2 +- .../telegram/ui/PrivacySettingsActivity.java | 18 +- .../java/org/telegram/ui/ProfileActivity.java | 479 +- .../ui/ReactionsDoubleTapManageActivity.java | 8 +- .../org/telegram/ui/SessionsActivity.java | 126 +- .../ui/SuggestClearDatabaseBottomSheet.java | 116 + .../ui/TextMessageEnterTransition.java | 3 +- .../java/org/telegram/ui/ThemeActivity.java | 31 +- .../ui/TwoStepVerificationActivity.java | 333 +- .../ui/TwoStepVerificationSetupActivity.java | 1443 +++--- .../telegram/ui/WallpapersListActivity.java | 4 +- .../java/org/webrtc/TextureViewRenderer.java | 8 +- .../main/java/org/webrtc/VideoEncoder.java | 83 +- .../src/main/java/org/webrtc/VideoFrame.java | 10 + TMessagesProj/src/main/res/anim/alpha_in.xml | 4 + TMessagesProj/src/main/res/anim/alpha_out.xml | 4 + TMessagesProj/src/main/res/anim/text_in.xml | 5 + TMessagesProj/src/main/res/anim/text_out.xml | 5 + .../src/main/res/anim/text_out_down.xml | 5 + .../res/drawable-hdpi/attach_arrow_left.png | Bin 0 -> 357 bytes .../res/drawable-hdpi/attach_arrow_right.png | Bin 0 -> 338 bytes .../main/res/drawable-hdpi/intro_tg_plane.png | Bin 2200 -> 2792 bytes .../res/drawable-hdpi/intro_tg_sphere.png | Bin 2300 -> 0 bytes .../src/main/res/drawable-hdpi/menu_clear.png | Bin 0 -> 749 bytes .../main/res/drawable-hdpi/msg_inputarrow.png | Bin 0 -> 422 bytes .../main/res/drawable-hdpi/msg_pin_code.png | Bin 0 -> 700 bytes .../main/res/drawable-hdpi/msg_voiceclose.png | Bin 0 -> 440 bytes .../res/drawable-hdpi/pip_video_close.png | Bin 0 -> 589 bytes .../res/drawable-hdpi/pip_video_expand.png | Bin 0 -> 679 bytes .../main/res/drawable-hdpi/voice_expand.png | Bin 0 -> 887 bytes .../main/res/drawable-hdpi/voice_minimize.png | Bin 0 -> 896 bytes .../main/res/drawable-mdpi/intro_tg_plane.png | Bin 1523 -> 1759 bytes .../res/drawable-mdpi/intro_tg_sphere.png | Bin 1523 -> 0 bytes .../src/main/res/drawable-mdpi/menu_clear.png | Bin 0 -> 602 bytes .../main/res/drawable-mdpi/msg_inputarrow.png | Bin 0 -> 305 bytes .../main/res/drawable-mdpi/msg_pin_code.png | Bin 0 -> 541 bytes .../main/res/drawable-mdpi/msg_voiceclose.png | Bin 0 -> 394 bytes .../res/drawable-mdpi/pip_video_close.png | Bin 0 -> 442 bytes .../res/drawable-mdpi/pip_video_expand.png | Bin 0 -> 537 bytes .../main/res/drawable-mdpi/voice_expand.png | Bin 0 -> 618 bytes .../main/res/drawable-mdpi/voice_minimize.png | Bin 0 -> 687 bytes .../res/drawable-xhdpi/attach_arrow_left.png | Bin 0 -> 445 bytes .../res/drawable-xhdpi/attach_arrow_right.png | Bin 0 -> 441 bytes .../res/drawable-xhdpi/intro_tg_plane.png | Bin 2938 -> 3973 bytes .../res/drawable-xhdpi/intro_tg_sphere.png | Bin 3124 -> 0 bytes .../main/res/drawable-xhdpi/menu_clear.png | Bin 0 -> 1024 bytes .../res/drawable-xhdpi/msg_inputarrow.png | Bin 0 -> 448 bytes .../main/res/drawable-xhdpi/msg_pin_code.png | Bin 0 -> 962 bytes .../res/drawable-xhdpi/msg_voiceclose.png | Bin 0 -> 602 bytes .../res/drawable-xhdpi/pip_video_close.png | Bin 0 -> 677 bytes .../res/drawable-xhdpi/pip_video_expand.png | Bin 0 -> 823 bytes .../main/res/drawable-xhdpi/voice_expand.png | Bin 0 -> 1151 bytes .../res/drawable-xhdpi/voice_minimize.png | Bin 0 -> 1162 bytes .../res/drawable-xxhdpi/attach_arrow_left.png | Bin 0 -> 618 bytes .../drawable-xxhdpi/attach_arrow_right.png | Bin 0 -> 611 bytes .../res/drawable-xxhdpi/intro_tg_plane.png | Bin 4368 -> 6445 bytes .../res/drawable-xxhdpi/intro_tg_sphere.png | Bin 4406 -> 0 bytes .../main/res/drawable-xxhdpi/menu_clear.png | Bin 0 -> 1607 bytes .../res/drawable-xxhdpi/msg_inputarrow.png | Bin 0 -> 713 bytes .../main/res/drawable-xxhdpi/msg_pin_code.png | Bin 0 -> 1223 bytes .../res/drawable-xxhdpi/msg_voiceclose.png | Bin 0 -> 736 bytes .../res/drawable-xxhdpi/pip_video_close.png | Bin 0 -> 997 bytes .../res/drawable-xxhdpi/pip_video_expand.png | Bin 0 -> 1192 bytes .../main/res/drawable-xxhdpi/voice_expand.png | Bin 0 -> 1710 bytes .../res/drawable-xxhdpi/voice_minimize.png | Bin 0 -> 1752 bytes TMessagesProj/src/main/res/raw/camera.json | 2 +- .../src/main/res/raw/camera_wait.json | 1 + .../src/main/res/raw/code_laptop.json | 1 + .../src/main/res/raw/download_finish.json | 1 + .../src/main/res/raw/download_progress.json | 1 + .../src/main/res/raw/media_forbidden.json | 1 + .../main/res/raw/permission_request_apk.json | 1 + .../res/raw/permission_request_camera.json | 1 + .../res/raw/permission_request_contacts.json | 1 + .../res/raw/permission_request_folder.json | 1 + .../res/raw/permission_request_location.json | 1 + .../raw/permission_request_microphone.json | 1 + .../src/main/res/raw/phone_dots.json | 1 + .../src/main/res/raw/phone_dots_to_stars.json | 1 + .../src/main/res/raw/phone_flash_call.json | 1 + .../src/main/res/raw/phone_stars_to_dots.json | 1 + TMessagesProj/src/main/res/raw/sandclock.json | 1 + .../src/main/res/raw/sun_outline.tgs | 2 +- .../src/main/res/raw/utyan_newborn.json | 1 + .../src/main/res/raw/utyan_passcode.tgs | 1 + TMessagesProj/src/main/res/values/ids.xml | 3 + TMessagesProj/src/main/res/values/strings.xml | 131 +- 2625 files changed, 166215 insertions(+), 59265 deletions(-) create mode 100644 TMessagesProj/jni/voip/tgcalls/group/AVIOContextImpl.cpp create mode 100644 TMessagesProj/jni/voip/tgcalls/group/AVIOContextImpl.h create mode 100644 TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPartInternal.cpp create mode 100644 TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPartInternal.h create mode 100644 TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPartPersistentDecoder.cpp create mode 100644 TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPartPersistentDecoder.h delete mode 100644 TMessagesProj/jni/voip/tgcalls/reference/InstanceImplReference.cpp delete mode 100644 TMessagesProj/jni/voip/tgcalls/reference/InstanceImplReference.h delete mode 100644 TMessagesProj/jni/voip/webrtc/absl/base/dynamic_annotations.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/absl/base/internal/bits.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/base/internal/dynamic_annotations.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/base/internal/thread_annotations.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/cleanup/cleanup.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/cleanup/internal/cleanup.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/btree_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/btree_test.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/compressed_tuple_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/container_memory_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_function_defaults_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/hashtablez_sampler_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/layout_benchmark.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/layout_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/node_hash_policy_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set_allocator_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set_benchmark.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set_probe_benchmark.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/test_instance_tracker_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/unordered_map_constructor_test.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/unordered_map_lookup_test.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/unordered_map_members_test.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/unordered_map_modifiers_test.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/unordered_map_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/unordered_set_constructor_test.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/unordered_set_lookup_test.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/unordered_set_members_test.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/unordered_set_modifiers_test.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/container/internal/unordered_set_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/debugging/internal/demangle_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/debugging/internal/stack_consumption_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/debugging/internal/stacktrace_emscripten-inl.inc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/debugging/internal/stacktrace_riscv-inl.inc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/debugging/symbolize_darwin.inc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/debugging/symbolize_emscripten.inc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/flags/commandlineflag.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/flags/commandlineflag.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/flags/flag_benchmark.lds create mode 100644 TMessagesProj/jni/voip/webrtc/absl/flags/internal/path_util_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/flags/internal/private_handle_accessor.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/flags/internal/private_handle_accessor.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/flags/internal/program_name_test.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/absl/flags/internal/registry.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/flags/internal/sequence_lock.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/flags/internal/sequence_lock_test.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/absl/flags/internal/type_erased.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/absl/flags/internal/type_erased.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/flags/internal/usage_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/flags/reflection.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/flags/reflection.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/hash/internal/low_level_hash.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/hash/internal/low_level_hash.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/numeric/bits.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/numeric/int128_stream_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/numeric/int128_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/numeric/internal/bits.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/numeric/internal/representation.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/profiling/internal/exponential_biased.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/profiling/internal/exponential_biased.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/profiling/internal/periodic_sampler.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/profiling/internal/periodic_sampler.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/profiling/internal/sample_recorder.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/profiling/internal/sample_recorder_test.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/absl/random/internal/distributions.h delete mode 100644 TMessagesProj/jni/voip/webrtc/absl/random/internal/mocking_bit_gen_base.h delete mode 100644 TMessagesProj/jni/voip/webrtc/absl/random/internal/randen-keys.inc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/random/internal/randen_round_keys.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/status/internal/status_internal.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/status/internal/statusor_internal.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/status/statusor.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/status/statusor.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/ascii_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/charconv_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/cord_analysis.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/cord_analysis.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/cord_ring_reader_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/cord_ring_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/cord_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/cordz_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/cordz_test_helpers.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/escaping_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_data_edge.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_internal.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_rep_btree.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_rep_btree.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_rep_btree_navigator.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_rep_btree_navigator.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_rep_btree_reader.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_rep_btree_reader.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_rep_consume.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_rep_consume.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_rep_crc.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_rep_crc.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_rep_flat.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_rep_ring.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_rep_ring.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_rep_ring_reader.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cord_rep_test_util.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_functions.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_functions.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_functions_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_handle.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_handle.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_handle_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_info.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_info.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_info_statistics_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_info_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_sample_token.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_sample_token.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_sample_token_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_statistics.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_update_scope.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_update_scope_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_update_tracker.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/cordz_update_tracker_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/memutil_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/numbers_test_common.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/ostringstream_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/pow10_helper_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/resize_uninitialized_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/str_format/arg_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/str_format/bind_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/str_format/checker_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/str_format/convert_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/str_format/extension_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/str_format/output_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/str_format/parser_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/string_constant.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/string_constant_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/internal/utf8_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/match_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/numbers_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/str_cat_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/str_format_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/str_join_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/str_replace_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/str_split_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/string_view_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/strip_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/strings/substitute_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/synchronization/barrier_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/synchronization/blocking_counter_benchmark.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/synchronization/blocking_counter_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/synchronization/internal/futex.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/synchronization/internal/graphcycles_test.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/absl/synchronization/internal/mutex_nonprod.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/absl/synchronization/internal/mutex_nonprod.inc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/synchronization/internal/per_thread_sem_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/synchronization/lifetime_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/synchronization/mutex_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/synchronization/notification_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/time/civil_time_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/time/clock_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/time/duration_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/time/format_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/time/internal/cctz/src/civil_time_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/time/internal/cctz/src/time_zone_format_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/time/internal/cctz/src/time_zone_lookup_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/time/time_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/time/time_zone_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/types/any_exception_safety_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/types/any_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/types/compare_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/types/internal/conformance_testing.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/types/internal/conformance_testing_helpers.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/types/internal/conformance_testing_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/types/internal/parentheses.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/types/internal/transform_args.h create mode 100644 TMessagesProj/jni/voip/webrtc/absl/types/optional_exception_safety_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/types/optional_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/types/span_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/types/variant_exception_safety_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/types/variant_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/absl/utility/utility_test.cc create mode 100644 TMessagesProj/jni/voip/webrtc/api/metronome/metronome.h delete mode 100644 TMessagesProj/jni/voip/webrtc/api/neteq/OWNERS delete mode 100644 TMessagesProj/jni/voip/webrtc/api/stats/OWNERS create mode 100644 TMessagesProj/jni/voip/webrtc/api/test/mock_audio_sink.h create mode 100644 TMessagesProj/jni/voip/webrtc/api/test/mock_video_track.h create mode 100644 TMessagesProj/jni/voip/webrtc/api/test/peer_network_dependencies.h create mode 100644 TMessagesProj/jni/voip/webrtc/api/video/i444_buffer.cc create mode 100644 TMessagesProj/jni/voip/webrtc/api/video/i444_buffer.h create mode 100644 TMessagesProj/jni/voip/webrtc/api/video/render_resolution.h create mode 100644 TMessagesProj/jni/voip/webrtc/api/video/rtp_video_frame_assembler.cc create mode 100644 TMessagesProj/jni/voip/webrtc/api/video/rtp_video_frame_assembler.h delete mode 100644 TMessagesProj/jni/voip/webrtc/api/video/test/mock_recordable_encoded_frame.h delete mode 100644 TMessagesProj/jni/voip/webrtc/api/video_codecs/OWNERS create mode 100644 TMessagesProj/jni/voip/webrtc/api/video_track_source_constraints.h create mode 100644 TMessagesProj/jni/voip/webrtc/api/video_track_source_proxy_factory.h create mode 100644 TMessagesProj/jni/voip/webrtc/api/wrapping_async_dns_resolver.cc create mode 100644 TMessagesProj/jni/voip/webrtc/api/wrapping_async_dns_resolver.h delete mode 100644 TMessagesProj/jni/voip/webrtc/audio/OWNERS delete mode 100644 TMessagesProj/jni/voip/webrtc/audio/voip/test/mock_task_queue.h create mode 100644 TMessagesProj/jni/voip/webrtc/call/receive_stream.h create mode 100644 TMessagesProj/jni/voip/webrtc/call/rtp_transport_config.h create mode 100644 TMessagesProj/jni/voip/webrtc/call/rtp_transport_controller_send_factory.h create mode 100644 TMessagesProj/jni/voip/webrtc/call/rtp_transport_controller_send_factory_interface.h delete mode 100644 TMessagesProj/jni/voip/webrtc/common_video/OWNERS create mode 100644 TMessagesProj/jni/voip/webrtc/common_video/framerate_controller.cc create mode 100644 TMessagesProj/jni/voip/webrtc/common_video/framerate_controller.h create mode 100644 TMessagesProj/jni/voip/webrtc/common_video/h265/legacy_bit_buffer.cc create mode 100644 TMessagesProj/jni/voip/webrtc/common_video/h265/legacy_bit_buffer.h create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/encoder/bit_writer.cc create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/encoder/bit_writer.h create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/encoder/rtc_event_log_encoder_v3.cc create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/encoder/rtc_event_log_encoder_v3.h create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/events/fixed_length_encoding_parameters_v3.cc create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/events/fixed_length_encoding_parameters_v3.h create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/events/logged_rtp_rtcp.h create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/events/rtc_event_begin_log.cc create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/events/rtc_event_begin_log.h create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/events/rtc_event_definition.h create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/events/rtc_event_end_log.cc create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/events/rtc_event_end_log.h create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/events/rtc_event_field_encoding.cc create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/events/rtc_event_field_encoding.h create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/events/rtc_event_field_encoding_parser.cc create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/events/rtc_event_field_encoding_parser.h create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/events/rtc_event_field_extraction.cc create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/events/rtc_event_field_extraction.h delete mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/logged_events.cc create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/rtc_event_log_unittest_helper.cc create mode 100644 TMessagesProj/jni/voip/webrtc/logging/rtc_event_log/rtc_event_log_unittest_helper.h delete mode 100644 TMessagesProj/jni/voip/webrtc/media/base/h264_profile_level_id.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/media/base/h264_profile_level_id.h create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_coding/neteq/relative_arrival_delay_tracker.cc create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_coding/neteq/relative_arrival_delay_tracker.h create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_coding/neteq/reorder_optimizer.cc create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_coding/neteq/reorder_optimizer.h create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_coding/neteq/underrun_optimizer.cc create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_coding/neteq/underrun_optimizer.h create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc/analog_gain_stats_reporter.cc create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc/analog_gain_stats_reporter.h create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc/clipping_predictor.cc create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc/clipping_predictor.h create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc/clipping_predictor_evaluator.cc create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc/clipping_predictor_evaluator.h create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc/clipping_predictor_level_buffer.cc create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc/clipping_predictor_level_buffer.h delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc2/adaptive_agc.cc create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc2/adaptive_digital_gain_controller.cc rename TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc2/{adaptive_agc.h => adaptive_digital_gain_controller.h} (57%) delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc2/down_sampler.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc2/down_sampler.h delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc2/noise_spectrum_estimator.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc2/noise_spectrum_estimator.h delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc2/signal_classifier.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc2/signal_classifier.h delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc2/vad_with_level.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc2/vad_with_level.h create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc2/vad_wrapper.cc create mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/agc2/vad_wrapper.h delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/common.h delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/include/config.h delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/level_estimator.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/level_estimator.h delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/voice_detection.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/audio_processing/voice_detection.h create mode 100644 TMessagesProj/jni/voip/webrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.cc create mode 100644 TMessagesProj/jni/voip/webrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.h delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/rtp_rtcp/source/absolute_capture_time_receiver.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/rtp_rtcp/source/absolute_capture_time_receiver.h create mode 100644 TMessagesProj/jni/voip/webrtc/modules/rtp_rtcp/source/rtp_util.cc create mode 100644 TMessagesProj/jni/voip/webrtc/modules/rtp_rtcp/source/rtp_util.h delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/rtp_rtcp/source/rtp_utility.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/rtp_rtcp/source/rtp_utility.h create mode 100644 TMessagesProj/jni/voip/webrtc/modules/video_coding/codecs/av1/dav1d_decoder.cc rename TMessagesProj/jni/voip/webrtc/modules/video_coding/codecs/av1/{libaom_av1_encoder_absent.cc => dav1d_decoder.h} (55%) create mode 100644 TMessagesProj/jni/voip/webrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_supported.cc create mode 100644 TMessagesProj/jni/voip/webrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_supported.h delete mode 100644 TMessagesProj/jni/voip/webrtc/modules/video_coding/codecs/vp9/svc_rate_allocator.h create mode 100644 TMessagesProj/jni/voip/webrtc/modules/video_coding/frame_buffer3.cc create mode 100644 TMessagesProj/jni/voip/webrtc/modules/video_coding/frame_buffer3.h create mode 100644 TMessagesProj/jni/voip/webrtc/modules/video_coding/frame_helpers.cc create mode 100644 TMessagesProj/jni/voip/webrtc/modules/video_coding/frame_helpers.h create mode 100644 TMessagesProj/jni/voip/webrtc/modules/video_coding/h264_packet_buffer.cc create mode 100644 TMessagesProj/jni/voip/webrtc/modules/video_coding/h264_packet_buffer.h rename TMessagesProj/jni/voip/webrtc/modules/video_coding/{nack_module2.cc => nack_requester.cc} (73%) rename TMessagesProj/jni/voip/webrtc/modules/video_coding/{nack_module2.h => nack_requester.h} (71%) create mode 100644 TMessagesProj/jni/voip/webrtc/modules/video_coding/utility/bandwidth_quality_scaler.cc create mode 100644 TMessagesProj/jni/voip/webrtc/modules/video_coding/utility/bandwidth_quality_scaler.h rename TMessagesProj/jni/voip/webrtc/modules/video_coding/utility/{framerate_controller.cc => framerate_controller_deprecated.cc} (75%) rename TMessagesProj/jni/voip/webrtc/modules/video_coding/utility/{framerate_controller.h => framerate_controller_deprecated.h} (73%) create mode 100644 TMessagesProj/jni/voip/webrtc/modules/video_coding/utility/ivf_defines.h create mode 100644 TMessagesProj/jni/voip/webrtc/modules/video_coding/utility/vp9_constants.h delete mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/common/pair_hash.h create mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/public/dcsctp_handover_state.cc create mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/public/dcsctp_handover_state.h create mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/public/dcsctp_socket_factory.cc create mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/public/dcsctp_socket_factory.h create mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/public/mock_dcsctp_socket.h create mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/public/text_pcap_packet_observer.cc create mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/public/text_pcap_packet_observer.h create mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/socket/callback_deferrer.cc create mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/socket/packet_sender.cc create mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/socket/packet_sender.h delete mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/tx/fcfs_send_queue.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/tx/fcfs_send_queue.h create mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/tx/outstanding_data.cc create mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/tx/outstanding_data.h create mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/tx/rr_send_queue.cc create mode 100644 TMessagesProj/jni/voip/webrtc/net/dcsctp/tx/rr_send_queue.h create mode 100644 TMessagesProj/jni/voip/webrtc/pc/jsep_transport_collection.cc create mode 100644 TMessagesProj/jni/voip/webrtc/pc/jsep_transport_collection.h rename TMessagesProj/jni/voip/webrtc/{api => pc}/media_stream_proxy.h (82%) rename TMessagesProj/jni/voip/webrtc/{api => pc}/media_stream_track_proxy.h (78%) rename TMessagesProj/jni/voip/webrtc/{api => pc}/peer_connection_factory_proxy.h (85%) rename TMessagesProj/jni/voip/webrtc/{api => pc}/peer_connection_proxy.h (93%) create mode 100644 TMessagesProj/jni/voip/webrtc/pc/proxy.cc rename TMessagesProj/jni/voip/webrtc/{api => pc}/proxy.h (54%) create mode 100644 TMessagesProj/jni/voip/webrtc/pc/rtp_receiver_proxy.h create mode 100644 TMessagesProj/jni/voip/webrtc/pc/rtp_sender_proxy.h create mode 100644 TMessagesProj/jni/voip/webrtc/pc/video_track_source_proxy.cc rename TMessagesProj/jni/voip/webrtc/{api => pc}/video_track_source_proxy.h (70%) create mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/bitstream_reader.cc create mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/bitstream_reader.h create mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/containers/BUILD.gn create mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/containers/as_const.h create mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/containers/flat_map.h create mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/containers/flat_set.h rename TMessagesProj/jni/voip/webrtc/{modules/audio_processing/include/config.cc => rtc_base/containers/flat_tree.cc} (59%) create mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/containers/flat_tree.h create mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/containers/identity.h create mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/containers/invoke.h create mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/containers/move_only_int.h create mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/containers/not_fn.h create mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/containers/void_t.h create mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/experiments/bandwidth_quality_scaler_settings.cc create mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/experiments/bandwidth_quality_scaler_settings.h delete mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/hash.h rename TMessagesProj/jni/voip/webrtc/{net/dcsctp/public => rtc_base}/strong_alias.h (82%) delete mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/synchronization/mutex_race_check.h create mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/win/create_direct3d_device.cc create mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/win/create_direct3d_device.h delete mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/win32_socket_server.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/rtc_base/win32_socket_server.h create mode 100644 TMessagesProj/jni/voip/webrtc/sdk/android/generated_peerconnection_jni/IceCandidateErrorEvent_jni.h create mode 100644 TMessagesProj/jni/voip/webrtc/sdk/android/src/jni/audio_device/DEPS create mode 100644 TMessagesProj/jni/voip/webrtc/sdk/android/src/jni/dav1d_codec.cc rename TMessagesProj/jni/voip/webrtc/sdk/android/src/jni/{av1_codec.cc => libaom_av1_codec.cc} (87%) create mode 100644 TMessagesProj/jni/voip/webrtc/system_wrappers/include/denormal_disabler.h create mode 100644 TMessagesProj/jni/voip/webrtc/system_wrappers/source/denormal_disabler.cc create mode 100644 TMessagesProj/jni/voip/webrtc/video/adaptation/bandwidth_quality_scaler_resource.cc create mode 100644 TMessagesProj/jni/voip/webrtc/video/adaptation/bandwidth_quality_scaler_resource.h create mode 100644 TMessagesProj/jni/voip/webrtc/video/decode_synchronizer.cc create mode 100644 TMessagesProj/jni/voip/webrtc/video/decode_synchronizer.h create mode 100644 TMessagesProj/jni/voip/webrtc/video/frame_buffer_proxy.cc create mode 100644 TMessagesProj/jni/voip/webrtc/video/frame_buffer_proxy.h create mode 100644 TMessagesProj/jni/voip/webrtc/video/frame_cadence_adapter.cc create mode 100644 TMessagesProj/jni/voip/webrtc/video/frame_cadence_adapter.h create mode 100644 TMessagesProj/jni/voip/webrtc/video/frame_decode_scheduler.h create mode 100644 TMessagesProj/jni/voip/webrtc/video/frame_decode_timing.cc create mode 100644 TMessagesProj/jni/voip/webrtc/video/frame_decode_timing.h create mode 100644 TMessagesProj/jni/voip/webrtc/video/task_queue_frame_decode_scheduler.cc create mode 100644 TMessagesProj/jni/voip/webrtc/video/task_queue_frame_decode_scheduler.h delete mode 100644 TMessagesProj/jni/voip/webrtc/video/video_receive_stream.cc delete mode 100644 TMessagesProj/jni/voip/webrtc/video/video_receive_stream.h create mode 100644 TMessagesProj/jni/voip/webrtc/video/video_receive_stream_timeout_tracker.cc create mode 100644 TMessagesProj/jni/voip/webrtc/video/video_receive_stream_timeout_tracker.h create mode 100644 TMessagesProj/src/main/java/org/telegram/messenger/CharacterCompat.java create mode 100644 TMessagesProj/src/main/java/org/telegram/messenger/FingerprintController.java create mode 100644 TMessagesProj/src/main/java/org/telegram/messenger/OneUIUtilities.java create mode 100644 TMessagesProj/src/main/java/org/telegram/ui/BasePermissionsActivity.java create mode 100644 TMessagesProj/src/main/java/org/telegram/ui/BlurSettingsBottomSheet.java delete mode 100644 TMessagesProj/src/main/java/org/telegram/ui/CancelAccountDeletionActivity.java create mode 100644 TMessagesProj/src/main/java/org/telegram/ui/Components/AnimatedPhoneNumberEditText.java rename TMessagesProj/src/main/java/org/telegram/ui/Components/{ChatBlurredFrameLayout.java => BlurredFrameLayout.java} (63%) create mode 100644 TMessagesProj/src/main/java/org/telegram/ui/Components/BlurredLinearLayout.java create mode 100644 TMessagesProj/src/main/java/org/telegram/ui/Components/BlurredRecyclerView.java create mode 100644 TMessagesProj/src/main/java/org/telegram/ui/Components/ChatAttachAlertPhotoLayoutPreview.java create mode 100644 TMessagesProj/src/main/java/org/telegram/ui/Components/ChevronView.java create mode 100644 TMessagesProj/src/main/java/org/telegram/ui/Components/CustomPhoneKeyboardView.java create mode 100644 TMessagesProj/src/main/java/org/telegram/ui/Components/OutlineTextContainerView.java create mode 100644 TMessagesProj/src/main/java/org/telegram/ui/Components/SearchDownloadsContainer.java create mode 100644 TMessagesProj/src/main/java/org/telegram/ui/Components/SenderSelectPopup.java create mode 100644 TMessagesProj/src/main/java/org/telegram/ui/Components/SimpleFloatPropertyCompat.java create mode 100644 TMessagesProj/src/main/java/org/telegram/ui/Components/TransformableLoginButtonView.java create mode 100644 TMessagesProj/src/main/java/org/telegram/ui/Components/voip/RTMPStreamPipOverlay.java create mode 100644 TMessagesProj/src/main/java/org/telegram/ui/DownloadProgressIcon.java delete mode 100644 TMessagesProj/src/main/java/org/telegram/ui/MediaCalendarActivity.java create mode 100644 TMessagesProj/src/main/java/org/telegram/ui/SuggestClearDatabaseBottomSheet.java create mode 100644 TMessagesProj/src/main/res/anim/alpha_in.xml create mode 100644 TMessagesProj/src/main/res/anim/alpha_out.xml create mode 100644 TMessagesProj/src/main/res/anim/text_in.xml create mode 100644 TMessagesProj/src/main/res/anim/text_out.xml create mode 100644 TMessagesProj/src/main/res/anim/text_out_down.xml create mode 100644 TMessagesProj/src/main/res/drawable-hdpi/attach_arrow_left.png create mode 100644 TMessagesProj/src/main/res/drawable-hdpi/attach_arrow_right.png delete mode 100755 TMessagesProj/src/main/res/drawable-hdpi/intro_tg_sphere.png create mode 100644 TMessagesProj/src/main/res/drawable-hdpi/menu_clear.png create mode 100644 TMessagesProj/src/main/res/drawable-hdpi/msg_inputarrow.png create mode 100644 TMessagesProj/src/main/res/drawable-hdpi/msg_pin_code.png create mode 100644 TMessagesProj/src/main/res/drawable-hdpi/msg_voiceclose.png create mode 100644 TMessagesProj/src/main/res/drawable-hdpi/pip_video_close.png create mode 100644 TMessagesProj/src/main/res/drawable-hdpi/pip_video_expand.png create mode 100644 TMessagesProj/src/main/res/drawable-hdpi/voice_expand.png create mode 100644 TMessagesProj/src/main/res/drawable-hdpi/voice_minimize.png delete mode 100755 TMessagesProj/src/main/res/drawable-mdpi/intro_tg_sphere.png create mode 100644 TMessagesProj/src/main/res/drawable-mdpi/menu_clear.png create mode 100644 TMessagesProj/src/main/res/drawable-mdpi/msg_inputarrow.png create mode 100644 TMessagesProj/src/main/res/drawable-mdpi/msg_pin_code.png create mode 100644 TMessagesProj/src/main/res/drawable-mdpi/msg_voiceclose.png create mode 100644 TMessagesProj/src/main/res/drawable-mdpi/pip_video_close.png create mode 100644 TMessagesProj/src/main/res/drawable-mdpi/pip_video_expand.png create mode 100644 TMessagesProj/src/main/res/drawable-mdpi/voice_expand.png create mode 100644 TMessagesProj/src/main/res/drawable-mdpi/voice_minimize.png create mode 100644 TMessagesProj/src/main/res/drawable-xhdpi/attach_arrow_left.png create mode 100644 TMessagesProj/src/main/res/drawable-xhdpi/attach_arrow_right.png delete mode 100755 TMessagesProj/src/main/res/drawable-xhdpi/intro_tg_sphere.png create mode 100644 TMessagesProj/src/main/res/drawable-xhdpi/menu_clear.png create mode 100644 TMessagesProj/src/main/res/drawable-xhdpi/msg_inputarrow.png create mode 100644 TMessagesProj/src/main/res/drawable-xhdpi/msg_pin_code.png create mode 100644 TMessagesProj/src/main/res/drawable-xhdpi/msg_voiceclose.png create mode 100644 TMessagesProj/src/main/res/drawable-xhdpi/pip_video_close.png create mode 100644 TMessagesProj/src/main/res/drawable-xhdpi/pip_video_expand.png create mode 100644 TMessagesProj/src/main/res/drawable-xhdpi/voice_expand.png create mode 100644 TMessagesProj/src/main/res/drawable-xhdpi/voice_minimize.png create mode 100644 TMessagesProj/src/main/res/drawable-xxhdpi/attach_arrow_left.png create mode 100644 TMessagesProj/src/main/res/drawable-xxhdpi/attach_arrow_right.png delete mode 100755 TMessagesProj/src/main/res/drawable-xxhdpi/intro_tg_sphere.png create mode 100644 TMessagesProj/src/main/res/drawable-xxhdpi/menu_clear.png create mode 100644 TMessagesProj/src/main/res/drawable-xxhdpi/msg_inputarrow.png create mode 100644 TMessagesProj/src/main/res/drawable-xxhdpi/msg_pin_code.png create mode 100644 TMessagesProj/src/main/res/drawable-xxhdpi/msg_voiceclose.png create mode 100644 TMessagesProj/src/main/res/drawable-xxhdpi/pip_video_close.png create mode 100644 TMessagesProj/src/main/res/drawable-xxhdpi/pip_video_expand.png create mode 100644 TMessagesProj/src/main/res/drawable-xxhdpi/voice_expand.png create mode 100644 TMessagesProj/src/main/res/drawable-xxhdpi/voice_minimize.png create mode 100644 TMessagesProj/src/main/res/raw/camera_wait.json create mode 100644 TMessagesProj/src/main/res/raw/code_laptop.json create mode 100644 TMessagesProj/src/main/res/raw/download_finish.json create mode 100644 TMessagesProj/src/main/res/raw/download_progress.json create mode 100644 TMessagesProj/src/main/res/raw/media_forbidden.json create mode 100644 TMessagesProj/src/main/res/raw/permission_request_apk.json create mode 100644 TMessagesProj/src/main/res/raw/permission_request_camera.json create mode 100644 TMessagesProj/src/main/res/raw/permission_request_contacts.json create mode 100644 TMessagesProj/src/main/res/raw/permission_request_folder.json create mode 100644 TMessagesProj/src/main/res/raw/permission_request_location.json create mode 100644 TMessagesProj/src/main/res/raw/permission_request_microphone.json create mode 100644 TMessagesProj/src/main/res/raw/phone_dots.json create mode 100644 TMessagesProj/src/main/res/raw/phone_dots_to_stars.json create mode 100644 TMessagesProj/src/main/res/raw/phone_flash_call.json create mode 100644 TMessagesProj/src/main/res/raw/phone_stars_to_dots.json create mode 100644 TMessagesProj/src/main/res/raw/sandclock.json create mode 100644 TMessagesProj/src/main/res/raw/utyan_newborn.json create mode 100644 TMessagesProj/src/main/res/raw/utyan_passcode.tgs diff --git a/TMessagesProj/build.gradle b/TMessagesProj/build.gradle index d46252bf9..e35332c27 100644 --- a/TMessagesProj/build.gradle +++ b/TMessagesProj/build.gradle @@ -300,7 +300,7 @@ android { } } - defaultConfig.versionCode = 2566 + defaultConfig.versionCode = 2587 applicationVariants.all { variant -> variant.outputs.all { output -> @@ -319,7 +319,7 @@ android { defaultConfig { minSdkVersion 16 targetSdkVersion 30 - versionName "8.5.4" + versionName "8.6.0" vectorDrawables.generatedDensities = ['mdpi', 'hdpi', 'xhdpi', 'xxhdpi'] diff --git a/TMessagesProj/jni/ffmpeg/include/libavcodec/get_bits.h b/TMessagesProj/jni/ffmpeg/include/libavcodec/get_bits.h index a0695d318..e7b3caa2e 100644 --- a/TMessagesProj/jni/ffmpeg/include/libavcodec/get_bits.h +++ b/TMessagesProj/jni/ffmpeg/include/libavcodec/get_bits.h @@ -314,7 +314,7 @@ static inline void skip_remaining(GetBitContext *s, unsigned n) */ static inline unsigned int get_bits(GetBitContext *s, int n) { - register unsigned int tmp; + unsigned int tmp; #if CACHED_BITSTREAM_READER av_assert2(n>0 && n<=32); diff --git a/TMessagesProj/jni/intro/IntroRenderer.c b/TMessagesProj/jni/intro/IntroRenderer.c index 9c20edc79..b195a1efd 100644 --- a/TMessagesProj/jni/intro/IntroRenderer.c +++ b/TMessagesProj/jni/intro/IntroRenderer.c @@ -54,7 +54,7 @@ static TexturedShape powerful_mask, powerful_infinity, powerful_infinity_white; static Shape private_bg; -static TexturedShape telegram_sphere, telegram_plane; +static TexturedShape telegram_sphere, telegram_plane, telegram_mask; static Shape cloud_bg; @@ -72,7 +72,7 @@ static mat4x4 ribbons_layer; static TexturedShape ic_bubble_dot, ic_bubble, ic_cam_lens, ic_cam, ic_pencil, ic_pin, ic_smile_eye, ic_smile, ic_videocam; static GLuint ic_bubble_dot_texture, ic_bubble_texture, ic_cam_lens_texture, ic_cam_texture, ic_pencil_texture, ic_pin_texture, ic_smile_eye_texture, ic_smile_texture, ic_videocam_texture; -static GLuint telegram_sphere_texture, telegram_plane_texture; +static GLuint telegram_sphere_texture, telegram_plane_texture, telegram_mask_texture; static GLuint fast_spiral_texture, fast_body_texture, fast_arrow_texture, fast_arrow_shadow_texture; static GLuint free_knot_up_texture, free_knot_down_texture; static GLuint powerful_mask_texture, powerful_star_texture, powerful_infinity_texture, powerful_infinity_white_texture; @@ -105,6 +105,7 @@ static float scroll_offset; static float calculated_speedometer_sin; float ms0_anim; int fps_anim; +int last_stars_update_fps; int count_anim_fps; static float speedometer_scroll_offset = 0, free_scroll_offset = 0, private_scroll_offset = 0; float anim_pencil_start_time, anim_pencil_start_all_time, anim_pencil_start_all_end_time; @@ -125,6 +126,8 @@ static int32_t anim_pencil_period; static mat4x4 private_matrix; float cloud_scroll_offset; +vec4 background_color = {1, 1, 1, 1}; + static inline void vec2_add(vec2 r, vec2 a, vec2 b) { int32_t i; for (i = 0; i < 2; ++i) { @@ -1122,6 +1125,8 @@ xyz star_initial_position(int32_t randZ, int32_t forward) { } void draw_stars() { + int update = last_stars_update_fps != fps_anim; + last_stars_update_fps = fps_anim; float k = (float) width / (float) height; set_y_offset_objects(-100 * k * 0); @@ -1137,7 +1142,9 @@ void draw_stars() { } float speed = stars_scroll_offset + transition_speed; - stars[i].position.z += speed; + if (update) { + stars[i].position.z += speed; + } if (stars[i].position.z > 0 && speed > 0) { stars[i].position = star_initial_position(0, 1); @@ -1682,8 +1689,24 @@ void draw_safe(int32_t type, float alpha, float screw_alpha) { draw_textured_shape(&private_screw, private_matrix, NORMAL_ONE); } -JNIEXPORT void Java_org_telegram_messenger_Intro_onDrawFrame(JNIEnv *env, jclass class) { - time_local += 0.016f; +JNIEXPORT void Java_org_telegram_messenger_Intro_setBackgroundColor(JNIEnv *env, jclass class, jfloat r, jfloat g, jfloat b, jfloat a) { + background_color[0] = r; + background_color[1] = g; + background_color[2] = b; + background_color[3] = a; + + cloud_cover = create_rectangle(CSizeMake(240, 100), background_color); + cloud_cover.params.anchor.y = -50; + + TexturedShape was_mask = powerful_mask; + powerful_mask = create_textured_rectangle(CSizeMake(200, 200), powerful_mask_texture); + powerful_mask.params = was_mask.params; + + telegram_mask = create_textured_rectangle(CSizeMake(200, 150), telegram_mask_texture); +} + +JNIEXPORT void Java_org_telegram_messenger_Intro_onDrawFrame(JNIEnv *env, jclass class, jint deltaMs) { + time_local += (float) deltaMs / 1000; if (current_page != prev_page) { reset_ic(); @@ -1722,7 +1745,8 @@ JNIEXPORT void Java_org_telegram_messenger_Intro_onDrawFrame(JNIEnv *env, jclass } } - fps_anim++; + // Normalize if FPS is greater than 60 + fps_anim = (int)(time_local / 0.016f); if (count_anim_fps == 1 && date - ms0_anim >= duration_const) { count_anim_fps = 0; } @@ -1735,7 +1759,7 @@ JNIEXPORT void Java_org_telegram_messenger_Intro_onDrawFrame(JNIEnv *env, jclass float private_back_k = .8; - glClearColor(1, 1, 1, 1); + glClearColor(background_color[0], background_color[1], background_color[2], background_color[3]); glClear(GL_COLOR_BUFFER_BIT); @@ -2140,15 +2164,16 @@ JNIEXPORT void Java_org_telegram_messenger_Intro_onDrawFrame(JNIEnv *env, jclass draw_textured_shape(&telegram_sphere, main_matrix, NORMAL); float tt = MINf(0, (float) (-M_PI * 125.0f / 180.0f + time * M_PI * 2 * 1.5f)); - float dx = sinf(tt) * 75; - float dy = -sinf(tt) * 60; + float dx = t(-75, 0, 0, 0.15f, EaseIn); + float dy = t(75, 0, 0, 0.15f, EaseIn); telegram_plane.params.position = xyzMake(dx, dy, 0); - float scale = (cosf(tt) + 1) * 0.5f; - telegram_plane.params.scale = xyzMake(cosf(tt) * scale, scale, 1); + float scale = t(0.1f, 1, 0.03f, 0.15f, EaseOut); + telegram_plane.params.scale = xyzMake(scale, scale, 1); if (tt < D2R(125)) { glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA); draw_textured_shape(&telegram_plane, main_matrix, NORMAL_ONE); + draw_textured_shape(&telegram_mask, main_matrix, NORMAL); } } } else if (current_page == 1) { @@ -2159,18 +2184,18 @@ JNIEXPORT void Java_org_telegram_messenger_Intro_onDrawFrame(JNIEnv *env, jclass double tt = time * M_PI * 2 * 1.5f; - float dx = (float) sin(tt) * 75; - float dy = (float) -sin(tt) * 60; + float dx = t(0, 75, 0, 0.15f, EaseOut); + float dy = t(0, -75, 0, 0.15f, EaseOut); telegram_plane.params.position = xyzMake(dx, dy, 0); - float scale = (float) (cos(tt) + 1) * 0.5f; - - telegram_plane.params.scale = xyzMake((float) cos(tt) * scale, scale, 1); + float scale = t(1, 0.1f, 0.03f, 0.15f, EaseOut); + telegram_plane.params.scale = xyzMake(scale, scale, 1); if (tt < D2R(125)) { glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA); draw_textured_shape(&telegram_plane, main_matrix, NORMAL_ONE); + draw_textured_shape(&telegram_mask, main_matrix, NORMAL); } } } else if (current_page == 2) { @@ -2601,9 +2626,10 @@ JNIEXPORT void Java_org_telegram_messenger_Intro_setIcTextures(JNIEnv *env, jcla ic_videocam_texture = a_ic_videocam; } -JNIEXPORT void Java_org_telegram_messenger_Intro_setTelegramTextures(JNIEnv *env, jclass class, GLuint a_telegram_sphere, GLuint a_telegram_plane) { +JNIEXPORT void Java_org_telegram_messenger_Intro_setTelegramTextures(JNIEnv *env, jclass class, GLuint a_telegram_sphere, GLuint a_telegram_plane, GLuint a_telegram_mask) { telegram_sphere_texture = a_telegram_sphere; telegram_plane_texture = a_telegram_plane; + telegram_mask_texture = a_telegram_mask; } JNIEXPORT void Java_org_telegram_messenger_Intro_setFastTextures(JNIEnv *env, jclass class, GLuint a_fast_body, GLuint a_fast_spiral, GLuint a_fast_arrow, GLuint a_fast_arrow_shadow) { @@ -2691,6 +2717,7 @@ JNIEXPORT void Java_org_telegram_messenger_Intro_onSurfaceCreated(JNIEnv *env, j mask1 = create_rounded_rectangle(CSizeMake(60, 60), 0, 16, black_color); telegram_sphere = create_textured_rectangle(CSizeMake(150, 150), telegram_sphere_texture); + telegram_mask = create_textured_rectangle(CSizeMake(200, 150), telegram_mask_texture); telegram_plane = create_textured_rectangle(CSizeMake(82, 74), telegram_plane_texture); telegram_plane.params.anchor = xyzMake(6, -5, 0); @@ -2794,7 +2821,7 @@ JNIEXPORT void Java_org_telegram_messenger_Intro_onSurfaceCreated(JNIEnv *env, j cloud_extra_mask3 = create_circle(1, cloud_polygons_count, black_color); cloud_extra_mask4 = create_circle(1, cloud_polygons_count, black_color); - cloud_cover = create_rectangle(CSizeMake(240, 100), white_color); + cloud_cover = create_rectangle(CSizeMake(240, 100), background_color); cloud_cover.params.anchor.y = -50; vec4 cloud_color = {42 / 255.0f, 180 / 255.0f, 247 / 255.0f, 1}; diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp.h index 984762465..f0dc35ad5 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp.h @@ -32,22 +32,20 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp.h 356357 2020-01-04 20:33:12Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp.h 366750 2020-10-16 10:44:48Z tuexen $"); #endif #ifndef _NETINET_SCTP_H_ #define _NETINET_SCTP_H_ -#if (defined(__APPLE__) || defined(__Userspace_os_Linux) || defined(__Userspace_os_Darwin)) +#if defined(__APPLE__) || defined(__linux__) #include #endif - #include - -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) #define SCTP_PACKED __attribute__((packed)) #else #pragma pack (push, 1) @@ -191,7 +189,6 @@ struct sctp_paramhdr { #define SCTP_STREAM_RESET_INCOMING 0x00000001 #define SCTP_STREAM_RESET_OUTGOING 0x00000002 - /* here on down are more implementation specific */ #define SCTP_SET_DEBUG_LEVEL 0x00001005 #define SCTP_CLR_STAT_LOG 0x00001007 @@ -213,7 +210,6 @@ struct sctp_paramhdr { #define SCTP_PCB_STATUS 0x00001104 #define SCTP_GET_NONCE_VALUES 0x00001105 - /* Special hook for dynamically setting primary for all assoc's, * this is a write only option that requires root privilege. */ @@ -286,11 +282,11 @@ struct sctp_paramhdr { #define SCTP_PEELOFF 0x0000800a /* the real worker for sctp_getaddrlen() */ #define SCTP_GET_ADDR_LEN 0x0000800b -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) /* temporary workaround for Apple listen() issue, no args used */ #define SCTP_LISTEN_FIX 0x0000800c #endif -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) /* workaround for Cygwin on Windows: returns the SOCKET handle */ #define SCTP_GET_HANDLE 0x0000800d #endif @@ -336,7 +332,6 @@ struct sctp_paramhdr { /* First-come, first-serve */ #define SCTP_SS_FIRST_COME 0x00000005 - /* fragment interleave constants * setting must be one of these or * EINVAL returned. @@ -607,13 +602,12 @@ struct sctp_error_auth_invalid_hmac { #define SCTP_MOBILITY_FASTHANDOFF 0x00000002 #define SCTP_MOBILITY_PRIM_DELETED 0x00000004 - /* Smallest PMTU allowed when disabling PMTU discovery */ #define SCTP_SMALLEST_PMTU 512 /* Largest PMTU allowed when disabling PMTU discovery */ #define SCTP_LARGEST_PMTU 65536 -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) #pragma pack(pop) #endif #undef SCTP_PACKED @@ -632,9 +626,9 @@ struct sctp_error_auth_invalid_hmac { */ #define SCTP_MAX_SACK_DELAY 500 /* per RFC4960 */ #define SCTP_MAX_HB_INTERVAL 14400000 /* 4 hours in ms */ +#define SCTP_MIN_COOKIE_LIFE 1000 /* 1 second in ms */ #define SCTP_MAX_COOKIE_LIFE 3600000 /* 1 hour in ms */ - /* Types of logging/KTR tracing that can be enabled via the * sysctl net.inet.sctp.sctp_logging. You must also enable * SUBSYS tracing. diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_asconf.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_asconf.c index 19f6fe615..50e29a401 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_asconf.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_asconf.c @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_asconf.c 357197 2020-01-28 10:09:05Z tuexen $"); +__FBSDID("$FreeBSD$"); #endif #include @@ -53,10 +53,6 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_asconf.c 357197 2020-01-28 10:09:05Z t * SCTP_DEBUG_ASCONF2: detailed info */ -#if defined(__APPLE__) -#define APPLE_FILE_NO 1 -#endif - /* * RFC 5061 * @@ -588,7 +584,6 @@ sctp_process_asconf_set_primary(struct sockaddr *src, SCTP_MOBILITY_PRIM_DELETED) && (stcb->asoc.primary_destination->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { - sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_ASCONF + SCTP_LOC_1); @@ -602,8 +597,7 @@ sctp_process_asconf_set_primary(struct sockaddr *src, sctp_move_chunks_from_net(stcb, stcb->asoc.deleted_primary); } - sctp_delete_prim_timer(stcb->sctp_ep, stcb, - stcb->asoc.deleted_primary); + sctp_delete_prim_timer(stcb->sctp_ep, stcb); } } else { /* couldn't set the requested primary address! */ @@ -743,7 +737,7 @@ sctp_handle_asconf(struct mbuf *m, unsigned int offset, sctp_m_freem(m_ack); return; } - if (param_length <= sizeof(struct sctp_paramhdr)) { + if (param_length < sizeof(struct sctp_asconf_paramhdr)) { SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: param length (%u) too short\n", param_length); sctp_m_freem(m_ack); return; @@ -955,12 +949,12 @@ sctp_addr_match(struct sctp_paramhdr *ph, struct sockaddr *sa) * Cleanup for non-responded/OP ERR'd ASCONF */ void -sctp_asconf_cleanup(struct sctp_tcb *stcb, struct sctp_nets *net) +sctp_asconf_cleanup(struct sctp_tcb *stcb) { /* * clear out any existing asconfs going out */ - sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net, + sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_ASCONF + SCTP_LOC_2); stcb->asoc.asconf_seq_out_acked = stcb->asoc.asconf_seq_out; /* remove the old ASCONF on our outbound queue */ @@ -997,8 +991,12 @@ sctp_asconf_nets_cleanup(struct sctp_tcb *stcb, struct sctp_ifn *ifn) ((ifn == NULL) || (SCTP_GET_IF_INDEX_FROM_ROUTE(&net->ro) != ifn->ifn_index))) { /* clear any cached route */ +#if defined(__FreeBSD__) && !defined(__Userspace__) + RO_NHFREE(&net->ro); +#else RTFREE(net->ro.ro_rt); net->ro.ro_rt = NULL; +#endif } /* clear any cached source address */ if (net->src_addr_selected) { @@ -1009,7 +1007,6 @@ sctp_asconf_nets_cleanup(struct sctp_tcb *stcb, struct sctp_ifn *ifn) } } - void sctp_assoc_immediate_retrans(struct sctp_tcb *stcb, struct sctp_nets *dstnet) { @@ -1050,9 +1047,14 @@ sctp_assoc_immediate_retrans(struct sctp_tcb *stcb, struct sctp_nets *dstnet) (stcb->asoc.sent_queue_cnt > 0)) { struct sctp_tmit_chunk *chk; - chk = TAILQ_FIRST(&stcb->asoc.sent_queue); - sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, - stcb, chk->whoTo); + TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { + if (chk->whoTo != NULL) { + break; + } + } + if (chk != NULL) { + sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); + } } } return; @@ -1107,10 +1109,14 @@ sctp_path_check_and_react(struct sctp_tcb *stcb, struct sctp_ifa *newifa) if (addrnum == 1) { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { /* clear any cached route and source address */ +#if defined(__FreeBSD__) && !defined(__Userspace__) + RO_NHFREE(&net->ro); +#else if (net->ro.ro_rt) { RTFREE(net->ro.ro_rt); net->ro.ro_rt = NULL; } +#endif if (net->src_addr_selected) { sctp_free_ifa(net->ro._s_addr); net->ro._s_addr = NULL; @@ -1129,10 +1135,14 @@ sctp_path_check_and_react(struct sctp_tcb *stcb, struct sctp_ifa *newifa) /* Multiple local addresses exsist in the association. */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { /* clear any cached route and source address */ +#if defined(__FreeBSD__) && !defined(__Userspace__) + RO_NHFREE(&net->ro); +#else if (net->ro.ro_rt) { RTFREE(net->ro.ro_rt); net->ro.ro_rt = NULL; } +#endif if (net->src_addr_selected) { sctp_free_ifa(net->ro._s_addr); net->ro._s_addr = NULL; @@ -1147,7 +1157,11 @@ sctp_path_check_and_react(struct sctp_tcb *stcb, struct sctp_ifa *newifa) SCTP_RTALLOC((sctp_route_t *)&net->ro, stcb->sctp_ep->def_vrf_id, stcb->sctp_ep->fibnum); +#if defined(__FreeBSD__) && !defined(__Userspace__) + if (net->ro.ro_nh == NULL) +#else if (net->ro.ro_rt == NULL) +#endif continue; changed = 0; @@ -1187,7 +1201,7 @@ sctp_path_check_and_react(struct sctp_tcb *stcb, struct sctp_ifa *newifa) } } } -#endif /* __FreeBSD__ __APPLE__ __Userspace__ */ +#endif /* * process an ADD/DELETE IP ack from peer. @@ -1219,7 +1233,7 @@ sctp_asconf_addr_mgmt_ack(struct sctp_tcb *stcb, struct sctp_ifa *addr, uint32_t sctp_path_check_and_react(stcb, addr); return; } -#endif /* __FreeBSD__ __APPLE__ __Userspace__ */ +#endif /* clear any cached/topologically incorrect source addresses */ sctp_asconf_nets_cleanup(stcb, addr->ifn_p); } @@ -1353,7 +1367,6 @@ sctp_asconf_queue_mgmt(struct sctp_tcb *stcb, struct sctp_ifa *ifa, return (0); } - /* * add an asconf operation for the given ifa and type. * type = SCTP_ADD_IP_ADDRESS, SCTP_DEL_IP_ADDRESS, SCTP_SET_PRIM_ADDR. @@ -1723,10 +1736,9 @@ sctp_handle_asconf_ack(struct mbuf *m, int offset, char msg[SCTP_DIAG_INFO_LEN]; SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf_ack: got unexpected next serial number! Aborting asoc!\n"); - snprintf(msg, sizeof(msg), "Never sent serial number %8.8x", - serial_num); + SCTP_SNPRINTF(msg, sizeof(msg), "Never sent serial number %8.8x", serial_num); op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); - sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); + sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); *abort_no_unlock = 1; return; } @@ -1739,7 +1751,7 @@ sctp_handle_asconf_ack(struct mbuf *m, int offset, if (serial_num == asoc->asconf_seq_out - 1) { /* stop our timer */ - sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net, + sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_ASCONF + SCTP_LOC_5); } @@ -1765,7 +1777,7 @@ sctp_handle_asconf_ack(struct mbuf *m, int offset, sctp_asconf_ack_clear(stcb); return; } - if (param_length < sizeof(struct sctp_paramhdr)) { + if (param_length < sizeof(struct sctp_asconf_paramhdr)) { sctp_asconf_ack_clear(stcb); return; } @@ -1813,9 +1825,9 @@ sctp_handle_asconf_ack(struct mbuf *m, int offset, } /* switch */ /* update remaining ASCONF-ACK message length to process */ - ack_length -= SCTP_SIZE32(param_length); - if (ack_length <= 0) { - /* no more data in the mbuf chain */ + if (ack_length > SCTP_SIZE32(param_length)) { + ack_length -= SCTP_SIZE32(param_length); + } else { break; } offset += SCTP_SIZE32(param_length); @@ -1926,7 +1938,7 @@ sctp_addr_mgmt_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, switch (ifa->address.sa.sa_family) { #ifdef INET6 case AF_INET6: -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip6(inp->ip_inp.inp.inp_cred, &ifa->address.sin6.sin6_addr) != 0) { return; @@ -1936,7 +1948,7 @@ sctp_addr_mgmt_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, #endif #ifdef INET case AF_INET: -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip4(inp->ip_inp.inp.inp_cred, &ifa->address.sin.sin_addr) != 0) { return; @@ -2046,7 +2058,6 @@ sctp_addr_mgmt_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, } } - int sctp_asconf_iterator_ep(struct sctp_inpcb *inp, void *ptr, uint32_t val SCTP_UNUSED) { @@ -2110,7 +2121,6 @@ sctp_asconf_iterator_ep_end(struct sctp_inpcb *inp, void *ptr, uint32_t val SCTP laddr->action = 0; break; } - } } else if (l->action == SCTP_DEL_IP_ADDRESS) { LIST_FOREACH_SAFE(laddr, &inp->sctp_addr_list, sctp_nxt_addr, nladdr) { @@ -2165,7 +2175,7 @@ sctp_asconf_iterator_stcb(struct sctp_inpcb *inp, struct sctp_tcb *stcb, /* we skip unspecifed addresses */ continue; } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { continue; @@ -2199,7 +2209,7 @@ sctp_asconf_iterator_stcb(struct sctp_inpcb *inp, struct sctp_tcb *stcb, /* we skip unspecifed addresses */ continue; } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { continue; @@ -2227,7 +2237,6 @@ sctp_asconf_iterator_stcb(struct sctp_inpcb *inp, struct sctp_tcb *stcb, return; else continue; - break; } if (type == SCTP_ADD_IP_ADDRESS) { @@ -2236,18 +2245,19 @@ sctp_asconf_iterator_stcb(struct sctp_inpcb *inp, struct sctp_tcb *stcb, } else if (type == SCTP_DEL_IP_ADDRESS) { struct sctp_nets *net; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { - sctp_rtentry_t *rt; - /* delete this address if cached */ if (net->ro._s_addr == ifa) { sctp_free_ifa(net->ro._s_addr); net->ro._s_addr = NULL; net->src_addr_selected = 0; - rt = net->ro.ro_rt; - if (rt) { - RTFREE(rt); +#if defined(__FreeBSD__) && !defined(__Userspace__) + RO_NHFREE(&net->ro); +#else + if (net->ro.ro_rt) { + RTFREE(net->ro.ro_rt); net->ro.ro_rt = NULL; } +#endif /* * Now we deleted our src address, * should we not also now reset the @@ -2256,7 +2266,6 @@ sctp_asconf_iterator_stcb(struct sctp_inpcb *inp, struct sctp_tcb *stcb, */ stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net); net->RTO = 0; - } } } else if (type == SCTP_SET_PRIM_ADDR) { @@ -2488,7 +2497,7 @@ sctp_find_valid_localaddr(struct sctp_tcb *stcb, int addr_locked) /* skip unspecifed addresses */ continue; } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { continue; @@ -2522,7 +2531,7 @@ sctp_find_valid_localaddr(struct sctp_tcb *stcb, int addr_locked) /* we skip unspecifed addresses */ continue; } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { continue; @@ -2611,14 +2620,14 @@ sctp_compose_asconf(struct sctp_tcb *stcb, int *retlen, int addr_locked) if (m_asconf_chk == NULL) { /* no mbuf's */ SCTPDBG(SCTP_DEBUG_ASCONF1, - "compose_asconf: couldn't get chunk mbuf!\n"); + "sctp_compose_asconf: couldn't get chunk mbuf!\n"); return (NULL); } m_asconf = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); if (m_asconf == NULL) { /* no mbuf's */ SCTPDBG(SCTP_DEBUG_ASCONF1, - "compose_asconf: couldn't get mbuf!\n"); + "sctp_compose_asconf: couldn't get mbuf!\n"); sctp_m_freem(m_asconf_chk); return (NULL); } @@ -2743,10 +2752,12 @@ sctp_compose_asconf(struct sctp_tcb *stcb, int *retlen, int addr_locked) break; #endif default: - p_size = 0; - addr_size = 0; - addr_ptr = NULL; - break; + SCTPDBG(SCTP_DEBUG_ASCONF1, + "sctp_compose_asconf: no usable lookup addr (family = %d)!\n", + found_addr->sa_family); + sctp_m_freem(m_asconf_chk); + sctp_m_freem(m_asconf); + return (NULL); } lookup->ph.param_length = htons(SCTP_SIZE32(p_size)); memcpy(lookup->addr, addr_ptr, addr_size); @@ -2754,12 +2765,10 @@ sctp_compose_asconf(struct sctp_tcb *stcb, int *retlen, int addr_locked) } else { /* uh oh... don't have any address?? */ SCTPDBG(SCTP_DEBUG_ASCONF1, - "compose_asconf: no lookup addr!\n"); - /* XXX for now, we send a IPv4 address of 0.0.0.0 */ - lookup->ph.param_type = htons(SCTP_IPV4_ADDRESS); - lookup->ph.param_length = htons(SCTP_SIZE32(sizeof(struct sctp_ipv4addr_param))); - memset(lookup->addr, 0, sizeof(struct in_addr)); - SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(sizeof(struct sctp_ipv4addr_param)); + "sctp_compose_asconf: no lookup addr!\n"); + sctp_m_freem(m_asconf_chk); + sctp_m_freem(m_asconf); + return (NULL); } } /* chain it all together */ @@ -3055,10 +3064,6 @@ sctp_check_address_list_ep(struct sctp_tcb *stcb, struct mbuf *m, int offset, "check_addr_list_ep: laddr->ifa is NULL"); continue; } - if (laddr->ifa == NULL) { - SCTPDBG(SCTP_DEBUG_ASCONF1, "check_addr_list_ep: laddr->ifa->ifa_addr is NULL"); - continue; - } /* do i have it implicitly? */ if (sctp_cmpaddr(&laddr->ifa->address.sa, init_addr)) { continue; @@ -3120,7 +3125,7 @@ sctp_check_address_list_all(struct sctp_tcb *stcb, struct mbuf *m, int offset, #ifdef INET case AF_INET: sin = &sctp_ifa->address.sin; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { continue; @@ -3136,7 +3141,7 @@ sctp_check_address_list_all(struct sctp_tcb *stcb, struct mbuf *m, int offset, #ifdef INET6 case AF_INET6: sin6 = &sctp_ifa->address.sin6; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { continue; @@ -3205,7 +3210,7 @@ sctp_check_address_list(struct sctp_tcb *stcb, struct mbuf *m, int offset, */ uint32_t sctp_addr_mgmt_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa, - uint32_t type, uint32_t vrf_id, struct sctp_ifa *sctp_ifap) + uint32_t type, uint32_t vrf_id) { struct sctp_ifa *ifa; struct sctp_laddr *laddr, *nladdr; @@ -3216,9 +3221,7 @@ sctp_addr_mgmt_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa, return (EINVAL); } #endif - if (sctp_ifap) { - ifa = sctp_ifap; - } else if (type == SCTP_ADD_IP_ADDRESS) { + if (type == SCTP_ADD_IP_ADDRESS) { /* For an add the address MUST be on the system */ ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); } else if (type == SCTP_DEL_IP_ADDRESS) { @@ -3305,10 +3308,9 @@ sctp_addr_mgmt_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa, } void -sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb, - struct sctp_nets *net) +sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb, struct sctp_nets *net) { - struct sctp_asconf_addr *aa; + struct sctp_asconf_addr *aa_vtag, *aa_add, *aa_del; struct sctp_ifa *sctp_ifap; struct sctp_asconf_tag_param *vtag; #ifdef INET @@ -3317,6 +3319,7 @@ sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb, #ifdef INET6 struct sockaddr_in6 *to6; #endif + if (net == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing net\n"); return; @@ -3325,108 +3328,84 @@ sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb, SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing stcb\n"); return; } - /* Need to have in the asconf: - * - vtagparam(my_vtag/peer_vtag) - * - add(0.0.0.0) - * - del(0.0.0.0) - * - Any global addresses add(addr) - */ - SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa), - SCTP_M_ASC_ADDR); - if (aa == NULL) { - /* didn't get memory */ - SCTPDBG(SCTP_DEBUG_ASCONF1, - "sctp_asconf_send_nat_state_update: failed to get memory!\n"); + /* Need to have in the ASCONF: + * - VTAG(my_vtag/peer_vtag) + * - ADD(wildcard) + * - DEL(wildcard) + * - ADD(Any global addresses) + */ + SCTP_MALLOC(aa_vtag, struct sctp_asconf_addr *, sizeof(struct sctp_asconf_addr), SCTP_M_ASC_ADDR); + SCTP_MALLOC(aa_add, struct sctp_asconf_addr *, sizeof(struct sctp_asconf_addr), SCTP_M_ASC_ADDR); + SCTP_MALLOC(aa_del, struct sctp_asconf_addr *, sizeof(struct sctp_asconf_addr), SCTP_M_ASC_ADDR); + + if ((aa_vtag == NULL) || (aa_add == NULL) || (aa_del == NULL)) { + /* Didn't get memory */ + SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: failed to get memory!\n"); +out: + if (aa_vtag != NULL) { + SCTP_FREE(aa_vtag, SCTP_M_ASC_ADDR); + } + if (aa_add != NULL) { + SCTP_FREE(aa_add, SCTP_M_ASC_ADDR); + } + if (aa_del != NULL) { + SCTP_FREE(aa_del, SCTP_M_ASC_ADDR); + } return; } - aa->special_del = 0; - /* fill in asconf address parameter fields */ - /* top level elements are "networked" during send */ - aa->ifa = NULL; - aa->sent = 0; /* clear sent flag */ - vtag = (struct sctp_asconf_tag_param *)&aa->ap.aph; + memset(aa_vtag, 0, sizeof(struct sctp_asconf_addr)); + aa_vtag->special_del = 0; + /* Fill in ASCONF address parameter fields. */ + /* Top level elements are "networked" during send. */ + aa_vtag->ifa = NULL; + aa_vtag->sent = 0; /* clear sent flag */ + vtag = (struct sctp_asconf_tag_param *)&aa_vtag->ap.aph; vtag->aph.ph.param_type = SCTP_NAT_VTAGS; vtag->aph.ph.param_length = sizeof(struct sctp_asconf_tag_param); vtag->local_vtag = htonl(stcb->asoc.my_vtag); vtag->remote_vtag = htonl(stcb->asoc.peer_vtag); - TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next); - SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa), - SCTP_M_ASC_ADDR); - if (aa == NULL) { - /* didn't get memory */ - SCTPDBG(SCTP_DEBUG_ASCONF1, - "sctp_asconf_send_nat_state_update: failed to get memory!\n"); - return; - } - memset(aa, 0, sizeof(struct sctp_asconf_addr)); - /* fill in asconf address parameter fields */ - /* ADD(0.0.0.0) */ + memset(aa_add, 0, sizeof(struct sctp_asconf_addr)); + memset(aa_del, 0, sizeof(struct sctp_asconf_addr)); switch (net->ro._l_addr.sa.sa_family) { #ifdef INET case AF_INET: - aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS; - aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param); - aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS; - aa->ap.addrp.ph.param_length = sizeof (struct sctp_ipv4addr_param); - /* No need to add an address, we are using 0.0.0.0 */ - TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next); + aa_add->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS; + aa_add->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param); + aa_add->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS; + aa_add->ap.addrp.ph.param_length = sizeof (struct sctp_ipv4addr_param); + /* No need to fill the address, we are using 0.0.0.0 */ + aa_del->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS; + aa_del->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param); + aa_del->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS; + aa_del->ap.addrp.ph.param_length = sizeof (struct sctp_ipv4addr_param); + /* No need to fill the address, we are using 0.0.0.0 */ break; #endif #ifdef INET6 case AF_INET6: - aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS; - aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param); - aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS; - aa->ap.addrp.ph.param_length = sizeof (struct sctp_ipv6addr_param); - /* No need to add an address, we are using 0.0.0.0 */ - TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next); + aa_add->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS; + aa_add->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param); + aa_add->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS; + aa_add->ap.addrp.ph.param_length = sizeof (struct sctp_ipv6addr_param); + /* No need to fill the address, we are using ::0 */ + aa_del->ap.aph.ph.param_type = SCTP_DEL_IP_ADDRESS; + aa_del->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param); + aa_del->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS; + aa_del->ap.addrp.ph.param_length = sizeof (struct sctp_ipv6addr_param); + /* No need to fill the address, we are using ::0 */ break; #endif default: SCTPDBG(SCTP_DEBUG_ASCONF1, - "sctp_asconf_send_nat_state_update: unknown address family\n"); - SCTP_FREE(aa, SCTP_M_ASC_ADDR); - return; - } - SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa), - SCTP_M_ASC_ADDR); - if (aa == NULL) { - /* didn't get memory */ - SCTPDBG(SCTP_DEBUG_ASCONF1, - "sctp_asconf_send_nat_state_update: failed to get memory!\n"); - return; - } - memset(aa, 0, sizeof(struct sctp_asconf_addr)); - /* fill in asconf address parameter fields */ - /* ADD(0.0.0.0) */ - switch (net->ro._l_addr.sa.sa_family) { -#ifdef INET - case AF_INET: - aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS; - aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param); - aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS; - aa->ap.addrp.ph.param_length = sizeof (struct sctp_ipv4addr_param); - /* No need to add an address, we are using 0.0.0.0 */ - TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next); - break; -#endif -#ifdef INET6 - case AF_INET6: - aa->ap.aph.ph.param_type = SCTP_DEL_IP_ADDRESS; - aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param); - aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS; - aa->ap.addrp.ph.param_length = sizeof (struct sctp_ipv6addr_param); - /* No need to add an address, we are using 0.0.0.0 */ - TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next); - break; -#endif - default: - SCTPDBG(SCTP_DEBUG_ASCONF1, - "sctp_asconf_send_nat_state_update: unknown address family\n"); - SCTP_FREE(aa, SCTP_M_ASC_ADDR); - return; + "sctp_asconf_send_nat_state_update: unknown address family %d\n", + net->ro._l_addr.sa.sa_family); + goto out; } + TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa_vtag, next); + TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa_add, next); + TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa_del, next); + /* Now we must hunt the addresses and add all global addresses */ if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { struct sctp_vrf *vrf = NULL; @@ -3446,7 +3425,7 @@ sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb, #ifdef INET case AF_INET: to = &sctp_ifap->address.sin; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, &to->sin_addr) != 0) { continue; @@ -3463,7 +3442,7 @@ sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb, #ifdef INET6 case AF_INET6: to6 = &sctp_ifap->address.sin6; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, &to6->sin6_addr) != 0) { continue; diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_asconf.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_asconf.h index 28130f3b2..386894920 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_asconf.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_asconf.h @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_asconf.h 324056 2017-09-27 13:05:23Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_asconf.h 365071 2020-09-01 21:19:14Z mjg $"); #endif #ifndef _NETINET_SCTP_ASCONF_H_ @@ -45,7 +45,7 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_asconf.h 324056 2017-09-27 13:05:23Z t /* * function prototypes */ -extern void sctp_asconf_cleanup(struct sctp_tcb *, struct sctp_nets *); +extern void sctp_asconf_cleanup(struct sctp_tcb *); extern struct mbuf *sctp_compose_asconf(struct sctp_tcb *, int *, int); @@ -58,9 +58,8 @@ sctp_handle_asconf_ack(struct mbuf *, int, struct sctp_asconf_ack_chunk *, struct sctp_tcb *, struct sctp_nets *, int *); extern uint32_t -sctp_addr_mgmt_ep_sa(struct sctp_inpcb *, struct sockaddr *, - uint32_t, uint32_t, struct sctp_ifa *); - +sctp_addr_mgmt_ep_sa(struct sctp_inpcb *, struct sockaddr *, uint32_t, + uint32_t); extern int sctp_asconf_iterator_ep(struct sctp_inpcb *inp, void *ptr, uint32_t val); @@ -69,7 +68,6 @@ extern void sctp_asconf_iterator_stcb(struct sctp_inpcb *inp, void *ptr, uint32_t type); extern void sctp_asconf_iterator_end(void *ptr, uint32_t val); - extern int32_t sctp_set_primary_ip_address_sa(struct sctp_tcb *, struct sockaddr *); diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_auth.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_auth.c index 65571df2d..9281e0780 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_auth.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_auth.c @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_auth.c 355931 2019-12-20 15:25:08Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_auth.c 365071 2020-09-01 21:19:14Z mjg $"); #endif #include @@ -53,7 +53,6 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_auth.c 355931 2019-12-20 15:25:08Z tue #define SCTP_AUTH_DEBUG2 (SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_AUTH2) #endif /* SCTP_DEBUG */ - void sctp_clear_chunklist(sctp_auth_chklist_t *chklist) { @@ -101,7 +100,6 @@ sctp_copy_chunklist(sctp_auth_chklist_t *list) return (new_list); } - /* * add a chunk to the required chunks list */ @@ -241,7 +239,6 @@ sctp_unpack_auth_chunks(const uint8_t *ptr, uint8_t num_chunks, return (size); } - /* * allocate structure space for a key of length keylen */ @@ -458,7 +455,6 @@ sctp_compute_hashkey(sctp_key_t *key1, sctp_key_t *key2, sctp_key_t *shared) return (new_key); } - sctp_sharedkey_t * sctp_alloc_sharedkey(void) { @@ -567,11 +563,7 @@ sctp_auth_key_acquire(struct sctp_tcb *stcb, uint16_t key_id) } void -sctp_auth_key_release(struct sctp_tcb *stcb, uint16_t key_id, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif -) +sctp_auth_key_release(struct sctp_tcb *stcb, uint16_t key_id, int so_locked) { sctp_sharedkey_t *skey; @@ -636,7 +628,6 @@ sctp_copy_skeylist(const struct sctp_keyhead *src, struct sctp_keyhead *dest) return (count); } - sctp_hmaclist_t * sctp_alloc_hmaclist(uint16_t num_hmacs) { @@ -660,7 +651,6 @@ sctp_free_hmaclist(sctp_hmaclist_t *list) { if (list != NULL) { SCTP_FREE(list,SCTP_M_AUTH_HL); - list = NULL; } } @@ -831,7 +821,6 @@ sctp_free_authinfo(sctp_authinfo_t *authinfo) /* SCTP_FREE(authinfo, SCTP_M_AUTH_??); */ } - uint32_t sctp_get_auth_chunk_len(uint16_t hmac_algo) { @@ -1171,7 +1160,6 @@ sctp_auth_is_supported_hmac(sctp_hmaclist_t *list, uint16_t id) return (0); } - /*- * clear any cached key(s) if they match the given key id on an association. * the cached key(s) will be recomputed and re-cached at next use. @@ -1584,7 +1572,6 @@ sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset, m, auth_offset, auth->hmac); } - static void sctp_zero_m(struct mbuf *m, uint32_t m_offset, uint32_t size) { @@ -1650,6 +1637,9 @@ sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *auth, "SCTP AUTH Chunk: shared key %u, HMAC id %u\n", shared_key_id, hmac_id); +#if defined(__Userspace__) && defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) + return (0); +#endif /* is the indicated HMAC supported? */ if (!sctp_auth_is_supported_hmac(stcb->asoc.local_hmacs, hmac_id)) { struct mbuf *op_err; @@ -1730,11 +1720,6 @@ sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *auth, (void)sctp_compute_hmac_m(hmac_id, stcb->asoc.authinfo.recv_key, m, offset, computed_digest); -#if defined(__Userspace__) -#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - return (0); -#endif -#endif /* compare the computed digest with the one in the AUTH chunk */ if (timingsafe_bcmp(digest, computed_digest, digestlen) != 0) { SCTP_STAT_INCR(sctps_recvauthfailed); @@ -1750,11 +1735,7 @@ sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *auth, */ void sctp_notify_authentication(struct sctp_tcb *stcb, uint32_t indication, - uint16_t keyid, uint16_t alt_keyid, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif -) + uint16_t keyid, uint16_t alt_keyid, int so_locked) { struct mbuf *m_notify; struct sctp_authkey_event *auth; @@ -1809,7 +1790,6 @@ sctp_notify_authentication(struct sctp_tcb *stcb, uint32_t indication, &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); } - /*- * validates the AUTHentication related parameters in an INIT/INIT-ACK * Note: currently only used for INIT as INIT-ACK is handled inline @@ -1924,7 +1904,6 @@ sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit) saw_asconf = 1; if (chunks->chunk_types[i] == SCTP_ASCONF_ACK) saw_asconf_ack = 1; - } if (num_chunks) got_chklist = 1; diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_auth.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_auth.h index 9005ccb23..d43ada90d 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_auth.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_auth.h @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_auth.h 338749 2018-09-18 10:53:07Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_auth.h 365071 2020-09-01 21:19:14Z mjg $"); #endif #ifndef _NETINET_SCTP_AUTH_H_ @@ -99,8 +99,6 @@ typedef struct sctp_authinformation { uint16_t recv_keyid; /* last recv keyid (cached) */ } sctp_authinfo_t; - - /* * Macros */ @@ -149,7 +147,6 @@ extern void sctp_auth_key_acquire(struct sctp_tcb *stcb, uint16_t keyid); extern void sctp_auth_key_release(struct sctp_tcb *stcb, uint16_t keyid, int so_locked); - /* hmac list handling */ extern sctp_hmaclist_t *sctp_alloc_hmaclist(uint16_t num_hmacs); extern void sctp_free_hmaclist(sctp_hmaclist_t *list); diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_bsd_addr.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_bsd_addr.c index 8547f447c..83201712a 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_bsd_addr.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_bsd_addr.c @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_bsd_addr.c 353480 2019-10-13 18:17:08Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_bsd_addr.c 366426 2020-10-04 15:37:34Z tuexen $"); #endif #include @@ -50,12 +50,11 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_bsd_addr.c 353480 2019-10-13 18:17:08Z #include #include #include -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #endif /* Declare all of our malloc named types */ -#ifndef __Panda__ MALLOC_DEFINE(SCTP_M_MAP, "sctp_map", "sctp asoc map descriptor"); MALLOC_DEFINE(SCTP_M_STRMI, "sctp_stri", "sctp stream in array"); MALLOC_DEFINE(SCTP_M_STRMO, "sctp_stro", "sctp stream out array"); @@ -76,12 +75,11 @@ MALLOC_DEFINE(SCTP_M_MVRF, "sctp_mvrf", "sctp mvrf pcb list"); MALLOC_DEFINE(SCTP_M_ITER, "sctp_iter", "sctp iterator control"); MALLOC_DEFINE(SCTP_M_SOCKOPT, "sctp_socko", "sctp socket option"); MALLOC_DEFINE(SCTP_M_MCORE, "sctp_mcore", "sctp mcore queue"); -#endif /* Global NON-VNET structure that controls the iterator */ struct iterator_control sctp_it_ctl; +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) -#if !defined(__FreeBSD__) static void sctp_cleanup_itqueue(void) { @@ -109,7 +107,7 @@ void sctp_wakeup_iterator(void) { #if defined(SCTP_PROCESS_LEVEL_LOCKS) -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) WakeAllConditionVariable(&sctp_it_ctl.iterator_wakeup); #else pthread_cond_broadcast(&sctp_it_ctl.iterator_wakeup); @@ -131,7 +129,7 @@ sctp_iterator_thread(void *v SCTP_UNUSED) #endif SCTP_IPI_ITERATOR_WQ_LOCK(); /* In FreeBSD this thread never terminates. */ -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) for (;;) { #else while ((sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) == 0) { @@ -140,25 +138,25 @@ sctp_iterator_thread(void *v SCTP_UNUSED) msleep(&sctp_it_ctl.iterator_running, #if defined(__FreeBSD__) &sctp_it_ctl.ipi_iterator_wq_mtx, -#elif defined(__APPLE__) || defined(__Userspace_os_Darwin) +#elif defined(__APPLE__) sctp_it_ctl.ipi_iterator_wq_mtx, #endif 0, "waiting_for_work", 0); #else -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SleepConditionVariableCS(&sctp_it_ctl.iterator_wakeup, &sctp_it_ctl.ipi_iterator_wq_mtx, INFINITE); #else pthread_cond_wait(&sctp_it_ctl.iterator_wakeup, &sctp_it_ctl.ipi_iterator_wq_mtx); #endif #endif -#if !defined(__FreeBSD__) +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) { break; } #endif sctp_iterator_worker(); } -#if !defined(__FreeBSD__) +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) /* Now this thread needs to be terminated */ sctp_cleanup_itqueue(); sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_EXITED; @@ -187,23 +185,21 @@ sctp_startup_iterator(void) SCTP_ITERATOR_LOCK_INIT(); SCTP_IPI_ITERATOR_WQ_INIT(); TAILQ_INIT(&sctp_it_ctl.iteratorhead); -#if defined(__FreeBSD__) -#if __FreeBSD_version <= 701000 - kthread_create(sctp_iterator_thread, -#else +#if defined(__Userspace__) + if (sctp_userspace_thread_create(&sctp_it_ctl.thread_proc, &sctp_iterator_thread)) { + SCTP_PRINTF("ERROR: Creating sctp_iterator_thread failed.\n"); + } else { + SCTP_BASE_VAR(iterator_thread_started) = 1; + } +#elif defined(__FreeBSD__) kproc_create(sctp_iterator_thread, -#endif (void *)NULL, &sctp_it_ctl.thread_proc, - RFPROC, + 0, SCTP_KTHREAD_PAGES, SCTP_KTRHEAD_NAME); #elif defined(__APPLE__) kernel_thread_start((thread_continue_t)sctp_iterator_thread, NULL, &sctp_it_ctl.thread_proc); -#elif defined(__Userspace__) - if (sctp_userspace_thread_create(&sctp_it_ctl.thread_proc, &sctp_iterator_thread)) { - SCTP_PRINTF("ERROR: Creating sctp_iterator_thread failed.\n"); - } #endif } @@ -248,7 +244,6 @@ sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa) #endif /* __Userspace__ */ #endif /* INET6 */ - #if !defined(__Userspace__) static uint32_t sctp_is_desired_interface_type(struct ifnet *ifn) @@ -256,7 +251,7 @@ sctp_is_desired_interface_type(struct ifnet *ifn) int result; /* check the interface type to see if it's one we care about */ -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) switch(ifnet_type(ifn)) { #else switch (ifn->if_type) { @@ -282,7 +277,7 @@ sctp_is_desired_interface_type(struct ifnet *ifn) case IFT_GIF: case IFT_L2VLAN: case IFT_STF: -#if !defined(__APPLE__) +#if !(defined(__APPLE__) && !defined(__Userspace__)) case IFT_IP: case IFT_IPOVERCDLC: case IFT_IPOVERCLAW: @@ -298,16 +293,17 @@ sctp_is_desired_interface_type(struct ifnet *ifn) return (result); } #endif +#if defined(__APPLE__) && !defined(__Userspace__) -#if defined(__APPLE__) int sctp_is_vmware_interface(struct ifnet *ifn) { return (strncmp(ifnet_name(ifn), "vmnet", 5) == 0); } + #endif -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) && defined(__Userspace__) #ifdef MALLOC #undef MALLOC #define MALLOC(x) HeapAlloc(GetProcessHeap(), 0, (x)) @@ -480,8 +476,7 @@ sctp_init_ifns_for_vrf(int vrfid) #endif } #endif - -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) static void sctp_init_ifns_for_vrf(int vrfid) { @@ -540,7 +535,7 @@ sctp_init_ifns_for_vrf(int vrfid) } else { ifa_flags = 0; } - snprintf(name, SCTP_IFNAMSIZ, "%s%d", ifnet_name(ifn), ifnet_unit(ifn)); + SCTP_SNPRINTF(name, SCTP_IFNAMSIZ, "%s%d", ifnet_name(ifn), ifnet_unit(ifn)); sctp_ifa = sctp_add_addr_to_vrf(vrfid, (void *)ifn, /* XXX */ ifnet_index(ifn), @@ -559,8 +554,7 @@ sctp_init_ifns_for_vrf(int vrfid) ifnet_list_free(ifnetlist); } #endif - -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) static void sctp_init_ifns_for_vrf(int vrfid) { @@ -716,16 +710,15 @@ sctp_addr_change(struct ifaddr *ifa, int cmd) } if (cmd == RTM_ADD) { (void)sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID, (void *)ifa->ifa_ifp, -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) ifnet_index(ifa->ifa_ifp), ifnet_type(ifa->ifa_ifp), ifnet_name(ifa->ifa_ifp), #else ifa->ifa_ifp->if_index, ifa->ifa_ifp->if_type, ifa->ifa_ifp->if_xname, #endif (void *)ifa, ifa->ifa_addr, ifa_flags, 1); } else { - sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr, -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) ifnet_index(ifa->ifa_ifp), ifnet_name(ifa->ifa_ifp)); #else @@ -740,31 +733,13 @@ sctp_addr_change(struct ifaddr *ifa, int cmd) #endif } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) void sctp_addr_change_event_handler(void *arg __unused, struct ifaddr *ifa, int cmd) { sctp_addr_change(ifa, cmd); } - -void -sctp_add_or_del_interfaces(int (*pred)(struct ifnet *), int add) -{ - struct ifnet *ifn; - struct ifaddr *ifa; - - IFNET_RLOCK(); - CK_STAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_link) { - if (!(*pred)(ifn)) { - continue; - } - CK_STAILQ_FOREACH(ifa, &ifn->if_addrhead, ifa_link) { - sctp_addr_change(ifa, add ? RTM_ADD : RTM_DELETE); - } - } - IFNET_RUNLOCK(); -} #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) void sctp_add_or_del_interfaces(int (*pred)(struct ifnet *), int add) { @@ -797,7 +772,7 @@ sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, int how, int allonebuf, int type) { struct mbuf *m = NULL; -#if defined(__FreeBSD__) && __FreeBSD_version > 1100052 || defined(__Userspace__) +#if defined(__FreeBSD__) || defined(__Userspace__) #if defined(__Userspace__) m = m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0, allonebuf); #else @@ -813,7 +788,7 @@ sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, m_freem(m); return (NULL); } - KASSERT(SCTP_BUF_NEXT(m) == NULL, ("%s: no chain allowed", __FUNCTION__)); + KASSERT(SCTP_BUF_NEXT(m) == NULL, ("%s: no chain allowed", __func__)); } #endif #ifdef SCTP_MBUF_LOGGING @@ -867,7 +842,6 @@ sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, return (m); } - #ifdef SCTP_PACKET_LOGGING void sctp_packet_log(struct mbuf *m) @@ -943,7 +917,6 @@ sctp_packet_log(struct mbuf *m) SCTP_BASE_VAR(packet_log_end)); SCTP_BASE_VAR(packet_log_end) = 0; goto no_log; - } lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisbegin]; *lenat = total_len; @@ -969,7 +942,6 @@ sctp_packet_log(struct mbuf *m) atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 1); } - int sctp_copy_out_packet_log(uint8_t *target, int length) { diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_bsd_addr.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_bsd_addr.h index c43823cb2..d6ab91b5b 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_bsd_addr.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_bsd_addr.h @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_bsd_addr.h 353480 2019-10-13 18:17:08Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_bsd_addr.h 365071 2020-09-01 21:19:14Z mjg $"); #endif #ifndef _NETINET_SCTP_BSD_ADDR_H_ @@ -49,7 +49,6 @@ void sctp_wakeup_iterator(void); void sctp_startup_iterator(void); - #ifdef INET6 void sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa); #endif @@ -61,10 +60,8 @@ int sctp_copy_out_packet_log(uint8_t *target, int length); #endif -#if !defined(__Panda__) void sctp_addr_change(struct ifaddr *ifa, int cmd); -#endif -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) void sctp_addr_change_event_handler(void *, struct ifaddr *, int); #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_callout.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_callout.c index c6cffc783..4c9be7568 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_callout.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_callout.c @@ -34,12 +34,12 @@ #if defined(__Userspace__) #include -#if !defined (__Userspace_os_Windows) +#if !defined(_WIN32) #include #include #include #endif -#if defined(__Userspace_os_NaCl) +#if defined(__native_client__) #include #endif #include @@ -54,6 +54,7 @@ #include #include #endif +#include /* * Callout/Timer routines for OS that doesn't have them @@ -86,17 +87,20 @@ sctp_os_timer_init(sctp_os_timer_t *c) memset(c, 0, sizeof(*c)); } -void +int sctp_os_timer_start(sctp_os_timer_t *c, uint32_t to_ticks, void (*ftn) (void *), void *arg) { + int ret = 0; + /* paranoia */ if ((c == NULL) || (ftn == NULL)) - return; + return (ret); SCTP_TIMERQ_LOCK(); /* check to see if we're rescheduling a timer */ if (c->c_flags & SCTP_CALLOUT_PENDING) { + ret = 1; if (c == sctp_os_timer_next) { sctp_os_timer_next = TAILQ_NEXT(c, tqe); } @@ -122,6 +126,7 @@ sctp_os_timer_start(sctp_os_timer_t *c, uint32_t to_ticks, void (*ftn) (void *), c->c_time = ticks + to_ticks; TAILQ_INSERT_TAIL(&SCTP_BASE_INFO(callqueue), c, tqe); SCTP_TIMERQ_UNLOCK(); + return (ret); } int @@ -175,7 +180,7 @@ sctp_handle_tick(uint32_t elapsed_ticks) SCTP_TIMERQ_UNLOCK(); } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) void sctp_timeout(void *arg SCTP_UNUSED) { @@ -192,7 +197,7 @@ user_sctp_timer_iterate(void *arg) { sctp_userspace_set_threadname("SCTP timer"); for (;;) { -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) Sleep(TIMEOUT_INTERVAL); #else struct timespec amount, remaining; @@ -206,13 +211,13 @@ user_sctp_timer_iterate(void *arg) if (atomic_cmpset_int(&SCTP_BASE_VAR(timer_thread_should_exit), 1, 1)) { break; } - sctp_handle_tick(MSEC_TO_TICKS(TIMEOUT_INTERVAL)); + sctp_handle_tick(sctp_msecs_to_ticks(TIMEOUT_INTERVAL)); } return (NULL); } void -sctp_start_timer(void) +sctp_start_timer_thread(void) { /* * No need to do SCTP_TIMERQ_LOCK_INIT(); @@ -223,7 +228,22 @@ sctp_start_timer(void) rc = sctp_userspace_thread_create(&SCTP_BASE_VAR(timer_thread), user_sctp_timer_iterate); if (rc) { SCTP_PRINTF("ERROR; return code from sctp_thread_create() is %d\n", rc); + } else { + SCTP_BASE_VAR(timer_thread_started) = 1; } } +void +sctp_stop_timer_thread(void) +{ + atomic_cmpset_int(&SCTP_BASE_VAR(timer_thread_should_exit), 0, 1); + if (SCTP_BASE_VAR(timer_thread_started)) { +#if defined(_WIN32) + WaitForSingleObject(SCTP_BASE_VAR(timer_thread), INFINITE); + CloseHandle(SCTP_BASE_VAR(timer_thread)); +#else + pthread_join(SCTP_BASE_VAR(timer_thread), NULL); +#endif + } +} #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_callout.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_callout.h index 13279da05..81fd8530d 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_callout.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_callout.h @@ -30,7 +30,7 @@ * SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include __FBSDID("$FreeBSD$"); #endif @@ -53,7 +53,7 @@ __FBSDID("$FreeBSD$"); #define SCTP_TICKS_PER_FASTTIMO 20 /* called about every 20ms */ #if defined(__Userspace__) -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) #define SCTP_TIMERQ_LOCK() EnterCriticalSection(&SCTP_BASE_VAR(timer_mtx)) #define SCTP_TIMERQ_UNLOCK() LeaveCriticalSection(&SCTP_BASE_VAR(timer_mtx)) #define SCTP_TIMERQ_LOCK_INIT() InitializeCriticalSection(&SCTP_BASE_VAR(timer_mtx)) @@ -88,11 +88,18 @@ typedef struct sctp_callout sctp_os_timer_t; #define SCTP_CALLOUT_PENDING 0x0004 /* callout is waiting for timeout */ void sctp_os_timer_init(sctp_os_timer_t *tmr); -void sctp_os_timer_start(sctp_os_timer_t *, uint32_t, void (*)(void *), void *); +/* Returns 1 if pending timer was rescheduled, 0 otherwise. */ +int sctp_os_timer_start(sctp_os_timer_t *, uint32_t, void (*)(void *), void *); +/* Returns 1 if pending timer was stopped, 0 otherwise. */ int sctp_os_timer_stop(sctp_os_timer_t *); void sctp_handle_tick(uint32_t); #define SCTP_OS_TIMER_INIT sctp_os_timer_init +/* + * NOTE: The next two shouldn't be called directly outside of sctp_timer_start() + * and sctp_timer_stop(), since they don't handle incrementing/decrementing + * relevant reference counts. + */ #define SCTP_OS_TIMER_START sctp_os_timer_start #define SCTP_OS_TIMER_STOP sctp_os_timer_stop /* MT FIXME: Is the following correct? */ @@ -102,9 +109,10 @@ void sctp_handle_tick(uint32_t); #define SCTP_OS_TIMER_DEACTIVATE(tmr) ((tmr)->c_flags &= ~SCTP_CALLOUT_ACTIVE) #if defined(__Userspace__) -void sctp_start_timer(void); +void sctp_start_timer_thread(void); +void sctp_stop_timer_thread(void); #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) void sctp_timeout(void *); #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_cc_functions.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_cc_functions.c index 013905793..68d53bd9e 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_cc_functions.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_cc_functions.c @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_cc_functions.c 356660 2020-01-12 15:45:27Z tuexen $"); +__FBSDID("$FreeBSD$"); #endif #include @@ -50,7 +50,7 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_cc_functions.c 356660 2020-01-12 15:45 #include #include #include -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #endif @@ -64,7 +64,7 @@ sctp_enforce_cwnd_limit(struct sctp_association *assoc, struct sctp_nets *net) if ((assoc->max_cwnd > 0) && (net->cwnd > assoc->max_cwnd) && (net->cwnd > (net->mtu - sizeof(struct sctphdr)))) { - net->cwnd = assoc->max_cwnd ; + net->cwnd = assoc->max_cwnd; if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) { net->cwnd = net->mtu - sizeof(struct sctphdr); } @@ -101,7 +101,7 @@ sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net) } sctp_enforce_cwnd_limit(assoc, net); net->ssthresh = assoc->peers_rwnd; -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) SDT_PROBE5(sctp, cwnd, net, init, stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, 0, net->cwnd); @@ -163,7 +163,6 @@ sctp_cwnd_update_after_fr(struct sctp_tcb *stcb, (uint64_t)net->mtu * (uint64_t)net->ssthresh) / (uint64_t)t_ssthresh); - } if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2) { uint32_t srtt; @@ -196,7 +195,7 @@ sctp_cwnd_update_after_fr(struct sctp_tcb *stcb, } net->cwnd = net->ssthresh; sctp_enforce_cwnd_limit(asoc, net); -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) SDT_PROBE5(sctp, cwnd, net, fr, stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, old_cwnd, net->cwnd); @@ -251,22 +250,21 @@ sctp_cwnd_update_after_fr(struct sctp_tcb *stcb, #define SCTP_INST_NEUTRAL 2 /* Neutral, no indication */ #define SCTP_INST_GAINING 3 /* Gaining, step down possible */ - -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) static int cc_bw_same(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, - uint64_t rtt_offset, uint64_t vtag, uint8_t inst_ind) + uint64_t rtt_offset, uint64_t vtag, uint8_t inst_ind) #else static int cc_bw_same(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nbw, - uint64_t rtt_offset, uint8_t inst_ind) + uint64_t rtt_offset, uint8_t inst_ind) #endif { -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) uint64_t oth, probepoint; #endif -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) probepoint = (((uint64_t)net->cwnd) << 32); #endif if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) { @@ -275,7 +273,7 @@ cc_bw_same(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nb * we don't update bw.. so we don't * update the rtt either. */ -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) /* Probe point 5 */ probepoint |= ((5 << 16) | 1); SDT_PROBE5(sctp, cwnd, net, rttvar, @@ -295,7 +293,7 @@ cc_bw_same(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nb ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) && ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) { /* Try a step down */ -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) oth = net->cc_mod.rtcc.vol_reduce; oth <<= 16; oth |= net->cc_mod.rtcc.step_cnt; @@ -324,7 +322,7 @@ cc_bw_same(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nb * we update both the bw and the rtt here to * lock this in as a good step down. */ -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) /* Probe point 6 */ probepoint |= ((6 << 16) | 0); SDT_PROBE5(sctp, cwnd, net, rttvar, @@ -335,7 +333,7 @@ cc_bw_same(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nb probepoint); #endif if (net->cc_mod.rtcc.steady_step) { -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) oth = net->cc_mod.rtcc.vol_reduce; oth <<= 16; oth |= net->cc_mod.rtcc.step_cnt; @@ -370,7 +368,7 @@ cc_bw_same(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nb } /* Ok bw and rtt remained the same .. no update to any */ -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) /* Probe point 7 */ probepoint |= ((7 << 16) | net->cc_mod.rtcc.ret_from_eq); SDT_PROBE5(sctp, cwnd, net, rttvar, @@ -407,22 +405,22 @@ cc_bw_same(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nb return ((int)net->cc_mod.rtcc.ret_from_eq); } -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) static int cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset, - uint64_t vtag, uint8_t inst_ind) + uint64_t vtag, uint8_t inst_ind) #else static int cc_bw_decrease(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset, - uint8_t inst_ind) + uint8_t inst_ind) #endif { -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) uint64_t oth, probepoint; #endif /* Bandwidth decreased.*/ -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) probepoint = (((uint64_t)net->cwnd) << 32); #endif if (net->rtt > net->cc_mod.rtcc.lbw_rtt+rtt_offset) { @@ -431,7 +429,7 @@ cc_bw_decrease(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_ if ((net->cwnd > net->cc_mod.rtcc.cwnd_at_bw_set) && (inst_ind != SCTP_INST_LOOSING)) { /* We caused it maybe.. back off? */ -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) /* PROBE POINT 1 */ probepoint |= ((1 << 16) | 1); SDT_PROBE5(sctp, cwnd, net, rttvar, @@ -448,7 +446,7 @@ cc_bw_decrease(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_ } return (1); } -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) /* Probe point 2 */ probepoint |= ((2 << 16) | 0); SDT_PROBE5(sctp, cwnd, net, rttvar, @@ -460,7 +458,7 @@ cc_bw_decrease(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_ #endif /* Someone else - fight for more? */ if (net->cc_mod.rtcc.steady_step) { -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) oth = net->cc_mod.rtcc.vol_reduce; oth <<= 16; oth |= net->cc_mod.rtcc.step_cnt; @@ -488,7 +486,7 @@ cc_bw_decrease(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_ goto out_decision; } else if (net->rtt < net->cc_mod.rtcc.lbw_rtt-rtt_offset) { /* bw & rtt decreased */ -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) /* Probe point 3 */ probepoint |= ((3 << 16) | 0); SDT_PROBE5(sctp, cwnd, net, rttvar, @@ -499,7 +497,7 @@ cc_bw_decrease(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_ probepoint); #endif if (net->cc_mod.rtcc.steady_step) { -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) oth = net->cc_mod.rtcc.vol_reduce; oth <<= 16; oth |= net->cc_mod.rtcc.step_cnt; @@ -524,7 +522,7 @@ cc_bw_decrease(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_ goto out_decision; } /* The bw decreased but rtt stayed the same */ -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) /* Probe point 4 */ probepoint |= ((4 << 16) | 0); SDT_PROBE5(sctp, cwnd, net, rttvar, @@ -535,7 +533,7 @@ cc_bw_decrease(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_ probepoint); #endif if (net->cc_mod.rtcc.steady_step) { -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) oth = net->cc_mod.rtcc.vol_reduce; oth <<= 16; oth |= net->cc_mod.rtcc.step_cnt; @@ -568,7 +566,7 @@ out_decision: } } -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) static int cc_bw_increase(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t vtag) #else @@ -576,7 +574,7 @@ static int cc_bw_increase(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nbw) #endif { -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) uint64_t oth, probepoint; #endif @@ -586,7 +584,7 @@ cc_bw_increase(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_ * update. Note that we pay no attention to * the inst_ind since our overall sum is increasing. */ -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) /* PROBE POINT 0 */ probepoint = (((uint64_t)net->cwnd) << 32); SDT_PROBE5(sctp, cwnd, net, rttvar, @@ -597,7 +595,7 @@ cc_bw_increase(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_ probepoint); #endif if (net->cc_mod.rtcc.steady_step) { -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) oth = net->cc_mod.rtcc.vol_reduce; oth <<= 16; oth |= net->cc_mod.rtcc.step_cnt; @@ -627,7 +625,7 @@ static int cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw) { uint64_t bw_offset, rtt_offset; -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) uint64_t probepoint, rtt, vtag; #endif uint64_t bytes_for_this_rtt, inst_bw; @@ -673,7 +671,7 @@ cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw) * change within 1/32nd */ bw_shift = SCTP_BASE_SYSCTL(sctp_rttvar_bw); -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) rtt = stcb->asoc.my_vtag; vtag = (rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) | (stcb->rport); probepoint = (((uint64_t)net->cwnd) << 32); @@ -694,12 +692,12 @@ cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw) inst_ind = SCTP_INST_LOOSING; else inst_ind = SCTP_INST_NEUTRAL; -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) probepoint |= ((0xb << 16) | inst_ind); #endif } else { inst_ind = net->cc_mod.rtcc.last_inst_ind; -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) inst_bw = bytes_for_this_rtt / (uint64_t)(net->rtt); /* Can't determine do not change */ probepoint |= ((0xc << 16) | inst_ind); @@ -707,13 +705,13 @@ cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw) } } else { inst_ind = net->cc_mod.rtcc.last_inst_ind; -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) inst_bw = bytes_for_this_rtt; /* Can't determine do not change */ probepoint |= ((0xd << 16) | inst_ind); #endif } -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) SDT_PROBE5(sctp, cwnd, net, rttvar, vtag, ((nbw << 32) | inst_bw), @@ -727,7 +725,7 @@ cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw) } bw_offset = net->cc_mod.rtcc.lbw >> bw_shift; if (nbw > net->cc_mod.rtcc.lbw + bw_offset) { -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) ret = cc_bw_increase(stcb, net, nbw, vtag); #else ret = cc_bw_increase(stcb, net, nbw); @@ -736,7 +734,7 @@ cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw) } rtt_offset = net->cc_mod.rtcc.lbw_rtt >> SCTP_BASE_SYSCTL(sctp_rttvar_rtt); if (nbw < net->cc_mod.rtcc.lbw - bw_offset) { -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, vtag, inst_ind); #else ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, inst_ind); @@ -747,7 +745,7 @@ cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw) * we are in a situation where * the bw stayed the same. */ -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) ret = cc_bw_same(stcb, net, nbw, rtt_offset, vtag, inst_ind); #else ret = cc_bw_same(stcb, net, nbw, rtt_offset, inst_ind); @@ -763,10 +761,10 @@ sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, int accum_moved, int reneged_all SCTP_UNUSED, int will_exit, int use_rtcc) { struct sctp_nets *net; -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) int old_cwnd; #endif - uint32_t t_ssthresh, t_cwnd, incr; + uint32_t t_ssthresh, incr; uint64_t t_ucwnd_sbw; uint64_t t_path_mptcp; uint64_t mptcp_like_alpha; @@ -775,7 +773,6 @@ sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, /* MT FIXME: Don't compute this over and over again */ t_ssthresh = 0; - t_cwnd = 0; t_ucwnd_sbw = 0; t_path_mptcp = 0; mptcp_like_alpha = 1; @@ -785,7 +782,6 @@ sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, max_path = 0; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { t_ssthresh += net->ssthresh; - t_cwnd += net->cwnd; /* lastsa>>3; we don't need to devide ...*/ srtt = net->lastsa; if (srtt > 0) { @@ -817,7 +813,6 @@ sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, /* update cwnd and Early FR */ /******************************/ TAILQ_FOREACH(net, &asoc->nets, sctp_next) { - #ifdef JANA_CMT_FAST_RECOVERY /* * CMT fast recovery code. Need to debug. @@ -837,7 +832,7 @@ sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, continue; } #ifdef JANA_CMT_FAST_RECOVERY - /* CMT fast recovery code + /* CMT fast recovery code */ /* if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery && net->will_exit_fast_recovery == 0) { @@ -879,7 +874,7 @@ sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, continue; } } else { -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) uint64_t vtag, probepoint; probepoint = (((uint64_t)net->cwnd) << 32); @@ -915,7 +910,7 @@ sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, if (net->flight_size + net->net_ack >= net->cwnd) { uint32_t limit; -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) old_cwnd = net->cwnd; #endif switch (asoc->sctp_cmt_on_off) { @@ -987,7 +982,7 @@ sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS); } -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) SDT_PROBE5(sctp, cwnd, net, ack, stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), @@ -1008,9 +1003,9 @@ sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, net->partial_bytes_acked += net->net_ack; if ((net->flight_size + net->net_ack >= net->cwnd) && - (net->partial_bytes_acked >= net->cwnd)) { + (net->partial_bytes_acked >= net->cwnd)) { net->partial_bytes_acked -= net->cwnd; -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) old_cwnd = net->cwnd; #endif switch (asoc->sctp_cmt_on_off) { @@ -1051,7 +1046,7 @@ sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, } net->cwnd += incr; sctp_enforce_cwnd_limit(asoc, net); -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) SDT_PROBE5(sctp, cwnd, net, ack, stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), @@ -1078,7 +1073,7 @@ sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, } } -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) static void sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb, struct sctp_nets *net) #else @@ -1086,13 +1081,13 @@ static void sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net) #endif { -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) int old_cwnd; old_cwnd = net->cwnd; #endif net->cwnd = net->mtu; -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) SDT_PROBE5(sctp, cwnd, net, ack, stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, old_cwnd, net->cwnd); @@ -1101,7 +1096,6 @@ sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_n (void *)net, net->cwnd); } - static void sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net) { @@ -1165,7 +1159,7 @@ sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net) } net->cwnd = net->mtu; net->partial_bytes_acked = 0; -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) SDT_PROBE5(sctp, cwnd, net, to, stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), @@ -1179,7 +1173,7 @@ sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net) static void sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *net, - int in_window, int num_pkt_lost, int use_rtcc) + int in_window, int num_pkt_lost, int use_rtcc) { int old_cwnd = net->cwnd; if ((use_rtcc) && (net->lan_type == SCTP_LAN_LOCAL) && (net->cc_mod.rtcc.use_dccc_ecn)) { @@ -1207,10 +1201,9 @@ sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets * if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); } - } SCTP_STAT_INCR(sctps_ecnereducedcwnd); - } else { + } else { if (in_window == 0) { SCTP_STAT_INCR(sctps_ecnereducedcwnd); net->ssthresh = net->cwnd / 2; @@ -1220,7 +1213,7 @@ sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets * net->RTO <<= 1; } net->cwnd = net->ssthresh; -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) SDT_PROBE5(sctp, cwnd, net, ecn, stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), @@ -1341,7 +1334,7 @@ sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb, sctp_enforce_cwnd_limit(&stcb->asoc, net); if (net->cwnd - old_cwnd != 0) { /* log only changes */ -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) SDT_PROBE5(sctp, cwnd, net, pd, stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), @@ -1357,7 +1350,7 @@ sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb, static void sctp_cwnd_update_after_output(struct sctp_tcb *stcb, - struct sctp_nets *net, int burst_limit) + struct sctp_nets *net, int burst_limit) { int old_cwnd = net->cwnd; @@ -1366,7 +1359,7 @@ sctp_cwnd_update_after_output(struct sctp_tcb *stcb, if (burst_limit) { net->cwnd = (net->flight_size + (burst_limit * net->mtu)); sctp_enforce_cwnd_limit(&stcb->asoc, net); -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) SDT_PROBE5(sctp, cwnd, net, bl, stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), @@ -1381,8 +1374,8 @@ sctp_cwnd_update_after_output(struct sctp_tcb *stcb, static void sctp_cwnd_update_after_sack(struct sctp_tcb *stcb, - struct sctp_association *asoc, - int accum_moved, int reneged_all, int will_exit) + struct sctp_association *asoc, + int accum_moved, int reneged_all, int will_exit) { /* Passing a zero argument in last disables the rtcc algorithm */ sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 0); @@ -1390,7 +1383,7 @@ sctp_cwnd_update_after_sack(struct sctp_tcb *stcb, static void sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, - int in_window, int num_pkt_lost) + int in_window, int num_pkt_lost) { /* Passing a zero argument in last disables the rtcc algorithm */ sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 0); @@ -1403,22 +1396,20 @@ sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, */ static void sctp_cwnd_update_rtcc_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, - int in_window, int num_pkt_lost) + int in_window, int num_pkt_lost) { sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 1); } - -static -void sctp_cwnd_update_rtcc_tsn_acknowledged(struct sctp_nets *net, - struct sctp_tmit_chunk *tp1) +static void sctp_cwnd_update_rtcc_tsn_acknowledged(struct sctp_nets *net, + struct sctp_tmit_chunk *tp1) { net->cc_mod.rtcc.bw_bytes += tp1->send_size; } static void sctp_cwnd_prepare_rtcc_net_for_sack(struct sctp_tcb *stcb SCTP_UNUSED, - struct sctp_nets *net) + struct sctp_nets *net) { if (net->cc_mod.rtcc.tls_needs_set > 0) { /* We had a bw measurment going on */ @@ -1431,14 +1422,14 @@ sctp_cwnd_prepare_rtcc_net_for_sack(struct sctp_tcb *stcb SCTP_UNUSED, static void sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb *stcb, - struct sctp_nets *net) + struct sctp_nets *net) { -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) uint64_t vtag, probepoint; #endif if (net->cc_mod.rtcc.lbw) { -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) /* Clear the old bw.. we went to 0 in-flight */ vtag = (net->rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) | (stcb->rport); @@ -1492,15 +1483,15 @@ sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb *stcb, static void sctp_set_rtcc_initial_cc_param(struct sctp_tcb *stcb, - struct sctp_nets *net) + struct sctp_nets *net) { -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) uint64_t vtag, probepoint; #endif sctp_set_initial_cc_param(stcb, net); stcb->asoc.use_precise_time = 1; -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 +#if defined(__FreeBSD__) && !defined(__Userspace__) probepoint = (((uint64_t)net->cwnd) << 32); probepoint |= ((9 << 16) | 0); vtag = (net->rtt << 32) | @@ -1527,15 +1518,14 @@ sctp_set_rtcc_initial_cc_param(struct sctp_tcb *stcb, net->cc_mod.rtcc.use_dccc_ecn = SCTP_BASE_SYSCTL(sctp_use_dccc_ecn); net->cc_mod.rtcc.step_cnt = 0; net->cc_mod.rtcc.last_step_state = 0; - - } static int sctp_cwnd_rtcc_socket_option(struct sctp_tcb *stcb, int setorget, - struct sctp_cc_option *cc_opt) + struct sctp_cc_option *cc_opt) { struct sctp_nets *net; + if (setorget == 1) { /* a set */ if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) { @@ -1600,8 +1590,8 @@ sctp_cwnd_update_rtcc_packet_transmitted(struct sctp_tcb *stcb SCTP_UNUSED, static void sctp_cwnd_update_rtcc_after_sack(struct sctp_tcb *stcb, - struct sctp_association *asoc, - int accum_moved, int reneged_all, int will_exit) + struct sctp_association *asoc, + int accum_moved, int reneged_all, int will_exit) { /* Passing a one argument at the last enables the rtcc algorithm */ sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 1); @@ -1842,15 +1832,14 @@ sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb, static void sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb, - struct sctp_association *asoc, - int accum_moved, int reneged_all SCTP_UNUSED, int will_exit) + struct sctp_association *asoc, + int accum_moved, int reneged_all SCTP_UNUSED, int will_exit) { struct sctp_nets *net; /******************************/ /* update cwnd and Early FR */ /******************************/ TAILQ_FOREACH(net, &asoc->nets, sctp_next) { - #ifdef JANA_CMT_FAST_RECOVERY /* * CMT fast recovery code. Need to debug. @@ -1870,7 +1859,7 @@ sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb, continue; } #ifdef JANA_CMT_FAST_RECOVERY - /* CMT fast recovery code + /* CMT fast recovery code */ /* if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery && net->will_exit_fast_recovery == 0) { @@ -1934,7 +1923,6 @@ sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb, } } - /* * H-TCP congestion control. The algorithm is detailed in: * R.N.Shorten, D.J.Leith: @@ -1943,7 +1931,6 @@ sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb, * http://www.hamilton.ie/net/htcp3.pdf */ - static int use_rtt_scaling = 1; static int use_bandwidth_switch = 1; @@ -2000,7 +1987,7 @@ measure_rtt(struct sctp_nets *net) if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->cc_mod.htcp_ca) > 3) { if (net->cc_mod.htcp_ca.maxRTT < net->cc_mod.htcp_ca.minRTT) net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.minRTT; - if (net->cc_mod.htcp_ca.maxRTT < srtt && srtt <= net->cc_mod.htcp_ca.maxRTT+MSEC_TO_TICKS(20)) + if (net->cc_mod.htcp_ca.maxRTT < srtt && srtt <= net->cc_mod.htcp_ca.maxRTT+sctp_msecs_to_ticks(20)) net->cc_mod.htcp_ca.maxRTT = srtt; } } @@ -2060,7 +2047,7 @@ htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT) } } - if (ca->modeswitch && minRTT > (uint32_t)MSEC_TO_TICKS(10) && maxRTT) { + if (ca->modeswitch && minRTT > sctp_msecs_to_ticks(10) && maxRTT) { ca->beta = (minRTT<<7)/maxRTT; if (ca->beta < BETA_MIN) ca->beta = BETA_MIN; @@ -2081,19 +2068,19 @@ htcp_alpha_update(struct htcp *ca) if (diff > (uint32_t)hz) { diff -= hz; - factor = 1+ ( 10*diff + ((diff/2)*(diff/2)/hz))/hz; + factor = 1+ (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz; } if (use_rtt_scaling && minRTT) { - uint32_t scale = (hz<<3)/(10*minRTT); - scale = min(max(scale, 1U<<2), 10U<<3); /* clamping ratio to interval [0.5,10]<<3 */ - factor = (factor<<3)/scale; - if (!factor) + uint32_t scale = (hz << 3) / (10 * minRTT); + scale = min(max(scale, 1U << 2), 10U << 3); /* clamping ratio to interval [0.5,10]<<3 */ + factor = (factor << 3) / scale; + if (factor != 0) factor = 1; } - ca->alpha = 2*factor*((1<<7)-ca->beta); - if (!ca->alpha) + ca->alpha = 2 * factor * ((1 << 7) - ca->beta); + if (ca->alpha != 0) ca->alpha = ALPHA_BASE; } @@ -2131,10 +2118,10 @@ htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net) { /*- * How to handle these functions? - * if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question. + * if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question. * return; */ - if (net->cwnd <= net->ssthresh) { + if (net->cwnd <= net->ssthresh) { /* We are in slow start */ if (net->flight_size + net->net_ack >= net->cwnd) { if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) { @@ -2150,7 +2137,6 @@ htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net) sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS); } - } sctp_enforce_cwnd_limit(&stcb->asoc, net); } else { @@ -2167,7 +2153,7 @@ htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net) */ /* What is snd_cwnd_cnt?? */ if (((net->partial_bytes_acked/net->mtu * net->cc_mod.htcp_ca.alpha) >> 7)*net->mtu >= net->cwnd) { - /*- + /*- * Does SCTP have a cwnd clamp? * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS). */ @@ -2238,7 +2224,6 @@ sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb, /* update cwnd and Early FR */ /******************************/ TAILQ_FOREACH(net, &asoc->nets, sctp_next) { - #ifdef JANA_CMT_FAST_RECOVERY /* * CMT fast recovery code. Need to debug. @@ -2258,7 +2243,7 @@ sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb, continue; } #ifdef JANA_CMT_FAST_RECOVERY - /* CMT fast recovery code + /* CMT fast recovery code */ /* if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery && net->will_exit_fast_recovery == 0) { @@ -2410,7 +2395,7 @@ sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, const struct sctp_cc_functions sctp_cc_functions[] = { { -#if defined(__Windows__) || (defined(__Userspace_os_Windows) && !defined(__MINGW32__)) +#if defined(_WIN32) && !defined(__MINGW32__) sctp_set_initial_cc_param, sctp_cwnd_update_after_sack, sctp_cwnd_update_exit_pf_common, @@ -2431,7 +2416,7 @@ const struct sctp_cc_functions sctp_cc_functions[] = { #endif }, { -#if defined(__Windows__) || (defined(__Userspace_os_Windows) && !defined(__MINGW32__)) +#if defined(_WIN32) && !defined(__MINGW32__) sctp_set_initial_cc_param, sctp_hs_cwnd_update_after_sack, sctp_cwnd_update_exit_pf_common, @@ -2452,7 +2437,7 @@ const struct sctp_cc_functions sctp_cc_functions[] = { #endif }, { -#if defined(__Windows__) || (defined(__Userspace_os_Windows) && !defined(__MINGW32__)) +#if defined(_WIN32) && !defined(__MINGW32__) sctp_htcp_set_initial_cc_param, sctp_htcp_cwnd_update_after_sack, sctp_cwnd_update_exit_pf_common, @@ -2473,7 +2458,7 @@ const struct sctp_cc_functions sctp_cc_functions[] = { #endif }, { -#if defined(__Windows__) || (defined(__Userspace_os_Windows) && !defined(__MINGW32__)) +#if defined(_WIN32) && !defined(__MINGW32__) sctp_set_rtcc_initial_cc_param, sctp_cwnd_update_rtcc_after_sack, sctp_cwnd_update_exit_pf_common, diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_constants.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_constants.h index 038a34024..37d6e8f20 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_constants.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_constants.h @@ -32,18 +32,18 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_constants.h 343089 2019-01-16 11:33:47Z tuexen $"); +__FBSDID("$FreeBSD$"); #endif #ifndef _NETINET_SCTP_CONSTANTS_H_ #define _NETINET_SCTP_CONSTANTS_H_ -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) && defined(__Userspace__) extern void getwintimeofday(struct timeval *tv); -#endif +#endif /* IANA assigned port number for SCTP over UDP encapsulation */ #define SCTP_OVER_UDP_TUNNELING_PORT 9899 @@ -92,13 +92,11 @@ extern void getwintimeofday(struct timeval *tv); /* #define SCTP_AUDITING_ENABLED 1 used for debug/auditing */ #define SCTP_AUDIT_SIZE 256 - #define SCTP_KTRHEAD_NAME "sctp_iterator" #define SCTP_KTHREAD_PAGES 0 #define SCTP_MCORE_NAME "sctp_core_worker" - /* If you support Multi-VRF how big to * make the initial array of VRF's to. */ @@ -268,7 +266,6 @@ extern void getwintimeofday(struct timeval *tv); #define SCTP_LOCK_UNKNOWN 2 - /* number of associations by default for zone allocation */ #define SCTP_MAX_NUM_OF_ASOC 40000 /* how many addresses per assoc remote and local */ @@ -393,7 +390,6 @@ extern void getwintimeofday(struct timeval *tv); #define IS_SCTP_CONTROL(a) (((a)->chunk_type != SCTP_DATA) && ((a)->chunk_type != SCTP_IDATA)) #define IS_SCTP_DATA(a) (((a)->chunk_type == SCTP_DATA) || ((a)->chunk_type == SCTP_IDATA)) - /* SCTP parameter types */ /*************0x0000 series*************/ #define SCTP_HEARTBEAT_INFO 0x0001 @@ -458,7 +454,6 @@ extern void getwintimeofday(struct timeval *tv); /* mask to get sticky */ #define SCTP_STICKY_OPTIONS_MASK 0x0c - /* * SCTP states for internal state machine */ @@ -551,24 +546,22 @@ extern void getwintimeofday(struct timeval *tv); #define SCTP_TIMER_TYPE_ASCONF 10 #define SCTP_TIMER_TYPE_SHUTDOWNGUARD 11 #define SCTP_TIMER_TYPE_AUTOCLOSE 12 -#define SCTP_TIMER_TYPE_EVENTWAKE 13 -#define SCTP_TIMER_TYPE_STRRESET 14 -#define SCTP_TIMER_TYPE_INPKILL 15 -#define SCTP_TIMER_TYPE_ASOCKILL 16 -#define SCTP_TIMER_TYPE_ADDR_WQ 17 -#define SCTP_TIMER_TYPE_PRIM_DELETED 18 +#define SCTP_TIMER_TYPE_STRRESET 13 +#define SCTP_TIMER_TYPE_INPKILL 14 +#define SCTP_TIMER_TYPE_ASOCKILL 15 +#define SCTP_TIMER_TYPE_ADDR_WQ 16 +#define SCTP_TIMER_TYPE_PRIM_DELETED 17 /* add new timers here - and increment LAST */ -#define SCTP_TIMER_TYPE_LAST 19 +#define SCTP_TIMER_TYPE_LAST 18 #define SCTP_IS_TIMER_TYPE_VALID(t) (((t) > SCTP_TIMER_TYPE_NONE) && \ ((t) < SCTP_TIMER_TYPE_LAST)) - -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) /* Number of ticks to run the main timer at in msec */ #define SCTP_MAIN_TIMER_DEFAULT 10 -#endif +#endif /* max number of TSN's dup'd that I will hold */ #define SCTP_MAX_DUP_TSNS 20 @@ -587,22 +580,7 @@ extern void getwintimeofday(struct timeval *tv); * number of clusters as a base. This way high bandwidth environments will * not get impacted by the lower bandwidth sending a bunch of 1 byte chunks */ -#ifdef __Panda__ -#define SCTP_ASOC_MAX_CHUNKS_ON_QUEUE 10240 -#else #define SCTP_ASOC_MAX_CHUNKS_ON_QUEUE 512 -#endif - - -/* The conversion from time to ticks and vice versa is done by rounding - * upwards. This way we can test in the code the time to be positive and - * know that this corresponds to a positive number of ticks. - */ -#define MSEC_TO_TICKS(x) ((hz == 1000) ? x : ((((x) * hz) + 999) / 1000)) -#define TICKS_TO_MSEC(x) ((hz == 1000) ? x : ((((x) * 1000) + (hz - 1)) / hz)) - -#define SEC_TO_TICKS(x) ((x) * hz) -#define TICKS_TO_SEC(x) (((x) + (hz - 1)) / hz) /* * Basically the minimum amount of time before I do a early FR. Making this @@ -632,8 +610,7 @@ extern void getwintimeofday(struct timeval *tv); #define SCTP_RTO_UPPER_BOUND (60000) /* 60 sec in ms */ #define SCTP_RTO_LOWER_BOUND (1000) /* 1 sec is ms */ -#define SCTP_RTO_INITIAL (3000) /* 3 sec in ms */ - +#define SCTP_RTO_INITIAL (1000) /* 1 sec in ms */ #define SCTP_INP_KILL_TIMEOUT 20 /* number of ms to retry kill of inpcb */ #define SCTP_ASOC_KILL_TIMEOUT 10 /* number of ms to retry kill of inpcb */ @@ -645,7 +622,6 @@ extern void getwintimeofday(struct timeval *tv); #define SCTP_DEF_PMTU_RAISE_SEC 600 /* 10 min between raise attempts */ - /* How many streams I request initially by default */ #define SCTP_OSTREAM_INITIAL 10 #define SCTP_ISTREAM_INITIAL 2048 @@ -728,7 +704,6 @@ extern void getwintimeofday(struct timeval *tv); #define SCTP_NUMBER_OF_SECRETS 8 /* or 8 * 4 = 32 octets */ #define SCTP_SECRET_SIZE 32 /* number of octets in a 256 bits */ - /* * SCTP upper layer notifications */ @@ -759,6 +734,7 @@ extern void getwintimeofday(struct timeval *tv); #define SCTP_NOTIFY_NO_PEER_AUTH 25 #define SCTP_NOTIFY_SENDER_DRY 26 #define SCTP_NOTIFY_REMOTE_ERROR 27 +#define SCTP_NOTIFY_ASSOC_TIMEDOUT 28 /* This is the value for messages that are NOT completely * copied down where we will start to split the message. @@ -769,7 +745,11 @@ extern void getwintimeofday(struct timeval *tv); #define SCTP_DEFAULT_SPLIT_POINT_MIN 2904 /* Maximum length of diagnostic information in error causes */ +#if defined(__Userspace__) +#define SCTP_DIAG_INFO_LEN 256 +#else #define SCTP_DIAG_INFO_LEN 128 +#endif /* ABORT CODES and other tell-tale location * codes are generated by adding the below @@ -787,9 +767,8 @@ extern void getwintimeofday(struct timeval *tv); #define SCTP_FROM_SCTP_ASCONF 0x80000000 #define SCTP_FROM_SCTP_OUTPUT 0x90000000 #define SCTP_FROM_SCTP_PEELOFF 0xa0000000 -#define SCTP_FROM_SCTP_PANDA 0xb0000000 -#define SCTP_FROM_SCTP_SYSCTL 0xc0000000 -#define SCTP_FROM_SCTP_CC_FUNCTIONS 0xd0000000 +#define SCTP_FROM_SCTP_SYSCTL 0xb0000000 +#define SCTP_FROM_SCTP_CC_FUNCTIONS 0xc0000000 /* Location ID's */ #define SCTP_LOC_1 0x00000001 @@ -827,7 +806,8 @@ extern void getwintimeofday(struct timeval *tv); #define SCTP_LOC_33 0x00000021 #define SCTP_LOC_34 0x00000022 #define SCTP_LOC_35 0x00000023 - +#define SCTP_LOC_36 0x00000024 +#define SCTP_LOC_37 0x00000025 /* Free assoc codes */ #define SCTP_NORMAL_PROC 0 @@ -847,7 +827,6 @@ extern void getwintimeofday(struct timeval *tv); #define SCTP_DONOT_SETSCOPE 0 #define SCTP_DO_SETSCOPE 1 - /* This value determines the default for when * we try to add more on the send queue., if * there is room. This prevents us from cycling @@ -874,7 +853,7 @@ extern void getwintimeofday(struct timeval *tv); #define SCTP_CHUNKQUEUE_SCALE 10 #endif -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) /* clock variance is 1 ms */ #define SCTP_CLOCK_GRANULARITY 1 #else @@ -930,7 +909,6 @@ extern void getwintimeofday(struct timeval *tv); } \ } while (0) - #define SCTP_RETRAN_DONE -1 #define SCTP_RETRAN_EXIT -2 @@ -981,12 +959,11 @@ extern void getwintimeofday(struct timeval *tv); /*- * defines for socket lock states. - * Used by __APPLE__ and SCTP_SO_LOCK_TESTING + * Used by __APPLE__ */ #define SCTP_SO_LOCKED 1 #define SCTP_SO_NOT_LOCKED 0 - /*- * For address locks, do we hold the lock? */ @@ -1012,7 +989,7 @@ extern void getwintimeofday(struct timeval *tv); #define SCTP_SOCKET_OPTION_LIMIT (64 * 1024) #if defined(__Userspace__) -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) #define SCTP_GETTIME_TIMEVAL(x) getwintimeofday(x) #define SCTP_GETPTIME_TIMEVAL(x) getwintimeofday(x) /* this doesn't seem to ever be used.. */ #else @@ -1020,7 +997,6 @@ extern void getwintimeofday(struct timeval *tv); #define SCTP_GETPTIME_TIMEVAL(x) gettimeofday(x, NULL) #endif #endif - #if defined(_KERNEL) #define SCTP_GETTIME_TIMEVAL(x) (getmicrouptime(x)) #define SCTP_GETPTIME_TIMEVAL(x) (microuptime(x)) @@ -1036,11 +1012,11 @@ do { \ } \ } while (0) -#if defined(__FreeBSD__) || defined(__Windows__) || defined(__Userspace__) +#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__) #define sctp_sowwakeup_locked(inp, so) \ do { \ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \ - SOCKBUF_UNLOCK(&((so)->so_snd)); \ + SOCKBUF_UNLOCK(&((so)->so_snd)); \ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEOUTPUT; \ } else { \ sowwakeup_locked(so); \ @@ -1050,7 +1026,7 @@ do { \ #define sctp_sowwakeup_locked(inp, so) \ do { \ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \ - SOCKBUF_UNLOCK(&((so)->so_snd)); \ + SOCKBUF_UNLOCK(&((so)->so_snd)); \ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEOUTPUT; \ } else { \ sowwakeup(so); \ @@ -1067,12 +1043,12 @@ do { \ } \ } while (0) -#if defined(__FreeBSD__) || defined(__Windows__) || defined(__Userspace__) +#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__) #define sctp_sorwakeup_locked(inp, so) \ do { \ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEINPUT; \ - SOCKBUF_UNLOCK(&((so)->so_rcv)); \ + SOCKBUF_UNLOCK(&((so)->so_rcv)); \ } else { \ sorwakeup_locked(so); \ } \ @@ -1083,7 +1059,7 @@ do { \ do { \ if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \ inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEINPUT; \ - SOCKBUF_UNLOCK(&((so)->so_rcv)); \ + SOCKBUF_UNLOCK(&((so)->so_rcv)); \ } else { \ sorwakeup(so); \ } \ diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_crc32.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_crc32.c index c08392a7a..0b5a06e06 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_crc32.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_crc32.c @@ -32,26 +32,22 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_crc32.c 352361 2019-09-15 18:29:45Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_crc32.c 362498 2020-06-22 14:36:14Z tuexen $"); #include "opt_sctp.h" -#if defined(__FreeBSD__) -#include -#endif -#ifdef SCTP -#include -#include -#include -#include -#else #include #include +#include #include +#include #include +#if defined(SCTP) || defined(SCTP_SUPPORT) +#include +#include #endif #else #include @@ -60,7 +56,7 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_crc32.c 352361 2019-09-15 18:29:45Z tu #include #endif -#if !defined(__FreeBSD__) +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) /** * * Routine Description: @@ -799,8 +795,11 @@ sctp_calculate_cksum(struct mbuf *m, uint32_t offset) return (base); } -#if defined(__FreeBSD__) -#ifdef SCTP +#if defined(__FreeBSD__) && !defined(__Userspace__) +#if defined(SCTP) || defined(SCTP_SUPPORT) + +VNET_DEFINE(struct sctp_base_info, system_base_info); + /* * Compute and insert the SCTP checksum in network byte order for a given * mbuf chain m which contains an SCTP packet starting at offset. diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_crc32.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_crc32.h index ac20c1034..5e76ff21b 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_crc32.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_crc32.h @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_crc32.h 327200 2017-12-26 12:35:02Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_crc32.h 362338 2020-06-18 19:32:34Z markj $"); #endif #ifndef _NETINET_SCTP_CRC32_H_ @@ -42,8 +42,8 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_crc32.h 327200 2017-12-26 12:35:02Z tu #if defined(_KERNEL) uint32_t sctp_calculate_cksum(struct mbuf *, uint32_t); -#if defined(__FreeBSD__) -#ifdef SCTP +#if defined(__FreeBSD__) && !defined(__Userspace__) +#if defined(SCTP) || defined(SCTP_SUPPORT) void sctp_delayed_cksum(struct mbuf *, uint32_t offset); #endif #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_header.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_header.h index 6035d9cbd..9226a9ef9 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_header.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_header.h @@ -32,24 +32,24 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_header.h 309682 2016-12-07 19:30:59Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_header.h 366114 2020-09-24 12:26:06Z tuexen $"); #endif #ifndef _NETINET_SCTP_HEADER_H_ #define _NETINET_SCTP_HEADER_H_ -#if defined(__Windows__) && !defined(__Userspace_os_Windows) +#if defined(_WIN32) && !defined(__Userspace__) #include #endif -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) #include #endif #include #include -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) #define SCTP_PACKED __attribute__((packed)) #else #pragma pack (push, 1) @@ -66,7 +66,6 @@ struct sctp_ipv4addr_param { #define SCTP_V6_ADDR_BYTES 16 - struct sctp_ipv6addr_param { struct sctp_paramhdr ph;/* type=SCTP_IPV6_PARAM_TYPE, len=20 */ uint8_t addr[SCTP_V6_ADDR_BYTES]; /* IPV6 address */ @@ -110,14 +109,12 @@ struct sctp_heartbeat_info_param { char address[SCTP_ADDRMAX]; } SCTP_PACKED; - /* draft-ietf-tsvwg-prsctp */ /* PR-SCTP supported parameter */ struct sctp_prsctp_supported_param { struct sctp_paramhdr ph; } SCTP_PACKED; - /* draft-ietf-tsvwg-addip-sctp */ struct sctp_asconf_paramhdr { /* an ASCONF "parameter" */ struct sctp_paramhdr ph;/* a SCTP parameter header */ @@ -129,14 +126,12 @@ struct sctp_asconf_addr_param { /* an ASCONF address parameter */ struct sctp_ipv6addr_param addrp; /* max storage size */ } SCTP_PACKED; - struct sctp_asconf_tag_param { /* an ASCONF NAT-Vtag parameter */ struct sctp_asconf_paramhdr aph; /* asconf "parameter" */ - uint32_t local_vtag; - uint32_t remote_vtag; + uint32_t local_vtag; + uint32_t remote_vtag; } SCTP_PACKED; - struct sctp_asconf_addrv4_param { /* an ASCONF address (v4) parameter */ struct sctp_asconf_paramhdr aph; /* asconf "parameter" */ struct sctp_ipv4addr_param addrp; /* max storage size */ @@ -149,7 +144,6 @@ struct sctp_supported_chunk_types_param { uint8_t chunk_types[]; } SCTP_PACKED; - /* * Structures for DATA chunks */ @@ -260,7 +254,6 @@ struct sctp_init_msg { #define sctp_init_ack_chunk sctp_init_chunk #define sctp_init_ack_msg sctp_init_msg - /* Selective Ack (SACK) */ struct sctp_gap_ack_block { uint16_t start; /* Gap Ack block start */ @@ -297,7 +290,6 @@ struct sctp_nr_sack_chunk { struct sctp_nr_sack nr_sack; } SCTP_PACKED; - /* Heartbeat Request (HEARTBEAT) */ struct sctp_heartbeat { struct sctp_heartbeat_info_param hb_info; @@ -312,7 +304,6 @@ struct sctp_heartbeat_chunk { #define sctp_heartbeat_ack sctp_heartbeat #define sctp_heartbeat_ack_chunk sctp_heartbeat_chunk - /* Abort Asssociation (ABORT) */ struct sctp_abort_chunk { struct sctp_chunkhdr ch; @@ -324,27 +315,23 @@ struct sctp_abort_msg { struct sctp_abort_chunk msg; } SCTP_PACKED; - /* Shutdown Association (SHUTDOWN) */ struct sctp_shutdown_chunk { struct sctp_chunkhdr ch; uint32_t cumulative_tsn_ack; } SCTP_PACKED; - /* Shutdown Acknowledgment (SHUTDOWN ACK) */ struct sctp_shutdown_ack_chunk { struct sctp_chunkhdr ch; } SCTP_PACKED; - /* Operation Error (ERROR) */ struct sctp_error_chunk { struct sctp_chunkhdr ch; /* optional error causes follow */ } SCTP_PACKED; - /* Cookie Echo (COOKIE ECHO) */ struct sctp_cookie_echo_chunk { struct sctp_chunkhdr ch; @@ -436,7 +423,6 @@ struct sctp_chunk_desc { uint32_t tsn_ifany; } SCTP_PACKED; - struct sctp_pktdrop_chunk { struct sctp_chunkhdr ch; uint32_t bottle_bw; @@ -487,10 +473,10 @@ struct sctp_stream_reset_response_tsn { } SCTP_PACKED; struct sctp_stream_reset_add_strm { - struct sctp_paramhdr ph; - uint32_t request_seq; - uint16_t number_of_streams; - uint16_t reserved; + struct sctp_paramhdr ph; + uint32_t request_seq; + uint16_t number_of_streams; + uint16_t reserved; } SCTP_PACKED; #define SCTP_STREAM_RESET_RESULT_NOTHING_TO_DO 0x00000000 /* XXX: unused */ @@ -563,48 +549,46 @@ struct sctp_auth_chunk { #ifndef SCTP_MAX_OVERHEAD #ifdef INET6 #define SCTP_MAX_OVERHEAD (sizeof(struct sctp_data_chunk) + \ - sizeof(struct sctphdr) + \ - sizeof(struct sctp_ecne_chunk) + \ - sizeof(struct sctp_sack_chunk) + \ - sizeof(struct ip6_hdr)) + sizeof(struct sctphdr) + \ + sizeof(struct sctp_ecne_chunk) + \ + sizeof(struct sctp_sack_chunk) + \ + sizeof(struct ip6_hdr)) #define SCTP_MED_OVERHEAD (sizeof(struct sctp_data_chunk) + \ - sizeof(struct sctphdr) + \ - sizeof(struct ip6_hdr)) - + sizeof(struct sctphdr) + \ + sizeof(struct ip6_hdr)) #define SCTP_MIN_OVERHEAD (sizeof(struct ip6_hdr) + \ - sizeof(struct sctphdr)) + sizeof(struct sctphdr)) #else #define SCTP_MAX_OVERHEAD (sizeof(struct sctp_data_chunk) + \ - sizeof(struct sctphdr) + \ - sizeof(struct sctp_ecne_chunk) + \ - sizeof(struct sctp_sack_chunk) + \ - sizeof(struct ip)) + sizeof(struct sctphdr) + \ + sizeof(struct sctp_ecne_chunk) + \ + sizeof(struct sctp_sack_chunk) + \ + sizeof(struct ip)) #define SCTP_MED_OVERHEAD (sizeof(struct sctp_data_chunk) + \ - sizeof(struct sctphdr) + \ - sizeof(struct ip)) - + sizeof(struct sctphdr) + \ + sizeof(struct ip)) #define SCTP_MIN_OVERHEAD (sizeof(struct ip) + \ - sizeof(struct sctphdr)) + sizeof(struct sctphdr)) #endif /* INET6 */ #endif /* !SCTP_MAX_OVERHEAD */ #define SCTP_MED_V4_OVERHEAD (sizeof(struct sctp_data_chunk) + \ - sizeof(struct sctphdr) + \ - sizeof(struct ip)) + sizeof(struct sctphdr) + \ + sizeof(struct ip)) #define SCTP_MIN_V4_OVERHEAD (sizeof(struct ip) + \ - sizeof(struct sctphdr)) + sizeof(struct sctphdr)) -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) #include #endif -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) && defined(__Userspace__) #pragma pack(pop) #endif #undef SCTP_PACKED diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_indata.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_indata.c index 8b332355a..64e4d0b61 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_indata.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_indata.c @@ -32,13 +32,13 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 353145 2019-10-06 08:47:10Z tuexen $"); +__FBSDID("$FreeBSD$"); #endif #include -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #endif #include @@ -55,7 +55,7 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 353145 2019-10-06 08:47:10Z t #include #include #include -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #endif /* @@ -72,8 +72,7 @@ sctp_add_chk_to_control(struct sctp_queued_to_read *control, struct sctp_stream_in *strm, struct sctp_tcb *stcb, struct sctp_association *asoc, - struct sctp_tmit_chunk *chk, int lock_held); - + struct sctp_tmit_chunk *chk, int hold_rlock); void sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) @@ -134,8 +133,6 @@ sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) return (calc); } - - /* * Build out our readq entry based on the incoming packet. */ @@ -169,6 +166,9 @@ sctp_build_readq_entry(struct sctp_tcb *stcb, read_queue_e->data = dm; read_queue_e->stcb = stcb; read_queue_e->port_from = stcb->rport; + if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { + read_queue_e->do_not_ref_stcb = 1; + } failed_build: return (read_queue_e); } @@ -180,7 +180,7 @@ sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) struct sctp_sndrcvinfo *outinfo; struct sctp_rcvinfo *rcvinfo; struct sctp_nxtinfo *nxtinfo; -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) WSACMSGHDR *cmh; #else struct cmsghdr *cmh; @@ -229,7 +229,7 @@ sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) SCTP_BUF_LEN(ret) = 0; /* We need a CMSG header followed by the struct */ -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) cmh = mtod(ret, WSACMSGHDR *); #else cmh = mtod(ret, struct cmsghdr *); @@ -252,7 +252,7 @@ sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; rcvinfo->rcv_context = sinfo->sinfo_context; rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); #else cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); @@ -278,7 +278,7 @@ sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid; nxtinfo->nxt_length = seinfo->serinfo_next_length; nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid; -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); #else cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); @@ -303,19 +303,18 @@ sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) return (ret); } - static void sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) { - uint32_t gap, i, cumackp1; - int fnd = 0; - int in_r=0, in_nr=0; + uint32_t gap, i; + int in_r, in_nr; + if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { return; } - cumackp1 = asoc->cumulative_tsn + 1; - if (SCTP_TSN_GT(cumackp1, tsn)) { - /* this tsn is behind the cum ack and thus we don't + if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { + /* + * This tsn is behind the cum ack and thus we don't * need to worry about it being moved from one to the other. */ return; @@ -323,33 +322,27 @@ sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap); in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap); - if ((in_r == 0) && (in_nr == 0)) { -#ifdef INVARIANTS - panic("Things are really messed up now"); -#else - SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn); - sctp_print_mapping_array(asoc); -#endif - } - if (in_nr == 0) + KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__)); + if (!in_nr) { SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); - if (in_r) - SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); - if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { - asoc->highest_tsn_inside_nr_map = tsn; - } - if (tsn == asoc->highest_tsn_inside_map) { - /* We must back down to see what the new highest is */ - for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { - SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); - if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { - asoc->highest_tsn_inside_map = i; - fnd = 1; - break; - } + if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { + asoc->highest_tsn_inside_nr_map = tsn; } - if (!fnd) { - asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; + } + if (in_r) { + SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); + if (tsn == asoc->highest_tsn_inside_map) { + /* We must back down to see what the new highest is. */ + for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { + SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); + if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { + asoc->highest_tsn_inside_map = i; + break; + } + } + if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) { + asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; + } } } } @@ -404,7 +397,7 @@ sctp_place_control_in_stream(struct sctp_stream_in *strm, if (unordered) { control->on_strm_q = SCTP_ON_UNORDERED; } else { - control->on_strm_q = SCTP_ON_ORDERED ; + control->on_strm_q = SCTP_ON_ORDERED; } break; } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) { @@ -425,9 +418,9 @@ sctp_place_control_in_stream(struct sctp_stream_in *strm, } TAILQ_INSERT_AFTER(q, at, control, next_instrm); if (unordered) { - control->on_strm_q = SCTP_ON_UNORDERED ; + control->on_strm_q = SCTP_ON_UNORDERED; } else { - control->on_strm_q = SCTP_ON_ORDERED ; + control->on_strm_q = SCTP_ON_ORDERED; } break; } @@ -447,36 +440,36 @@ sctp_abort_in_reasm(struct sctp_tcb *stcb, struct mbuf *oper; if (stcb->asoc.idata_supported) { - snprintf(msg, sizeof(msg), - "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x", - opspot, - control->fsn_included, - chk->rec.data.tsn, - chk->rec.data.sid, - chk->rec.data.fsn, chk->rec.data.mid); + SCTP_SNPRINTF(msg, sizeof(msg), + "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x", + opspot, + control->fsn_included, + chk->rec.data.tsn, + chk->rec.data.sid, + chk->rec.data.fsn, chk->rec.data.mid); } else { - snprintf(msg, sizeof(msg), - "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x", - opspot, - control->fsn_included, - chk->rec.data.tsn, - chk->rec.data.sid, - chk->rec.data.fsn, - (uint16_t)chk->rec.data.mid); + SCTP_SNPRINTF(msg, sizeof(msg), + "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x", + opspot, + control->fsn_included, + chk->rec.data.tsn, + chk->rec.data.sid, + chk->rec.data.fsn, + (uint16_t)chk->rec.data.mid); } oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); sctp_m_freem(chk->data); chk->data = NULL; sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; - sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED); *abort_flag = 1; } static void sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control) { - /* + /* * The control could not be placed and must be cleaned. */ struct sctp_tmit_chunk *chk, *nchk; @@ -545,29 +538,28 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb, */ TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm); if (asoc->idata_supported) { - snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", - strm->last_mid_delivered, control->sinfo_tsn, - control->sinfo_stream, control->mid); + SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", + strm->last_mid_delivered, control->sinfo_tsn, + control->sinfo_stream, control->mid); } else { - snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", - (uint16_t)strm->last_mid_delivered, - control->sinfo_tsn, - control->sinfo_stream, - (uint16_t)control->mid); + SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", + (uint16_t)strm->last_mid_delivered, + control->sinfo_tsn, + control->sinfo_stream, + (uint16_t)control->mid); } op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; - sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); + sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); *abort_flag = 1; return; - } queue_needed = 1; asoc->size_on_all_streams += control->length; sctp_ucount_incr(asoc->cnt_on_all_streams); nxt_todel = strm->last_mid_delivered + 1; if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; so = SCTP_INP_SO(stcb->sctp_ep); @@ -650,7 +642,7 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb, } break; } -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif } @@ -660,19 +652,17 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb, * to put it on the queue. */ if (sctp_place_control_in_stream(strm, asoc, control)) { - snprintf(msg, sizeof(msg), - "Queue to str MID: %u duplicate", - control->mid); + SCTP_SNPRINTF(msg, sizeof(msg), + "Queue to str MID: %u duplicate", control->mid); sctp_clean_up_control(stcb, control); op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; - sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); + sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); *abort_flag = 1; } } } - static void sctp_setup_tail_pointer(struct sctp_queued_to_read *control) { @@ -770,7 +760,7 @@ sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, ui } } -static void +static void sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control) { memset(nc, 0, sizeof(struct sctp_queued_to_read)); @@ -790,15 +780,16 @@ sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queu atomic_add_int(&nc->whoFrom->ref_count, 1); nc->stcb = control->stcb; nc->port_from = control->port_from; + nc->do_not_ref_stcb = control->do_not_ref_stcb; } -static void +static void sctp_reset_a_control(struct sctp_queued_to_read *control, struct sctp_inpcb *inp, uint32_t tsn) { control->fsn_included = tsn; if (control->on_read_q) { - /* + /* * We have to purge it from there, * hopefully this will work :-) */ @@ -847,14 +838,14 @@ restart: } memset(nc, 0, sizeof(struct sctp_queued_to_read)); TAILQ_REMOVE(&control->reasm, chk, sctp_next); - sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD); + sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held); fsn++; cnt_added++; chk = NULL; if (control->end_added) { /* We are done */ if (!TAILQ_EMPTY(&control->reasm)) { - /* + /* * Ok we have to move anything left on * the control queue to a new control. */ @@ -936,7 +927,7 @@ restart: } if (cnt_added && strm->pd_api_started) { #if defined(__Userspace__) - sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD); + sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held); #endif sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); } @@ -973,7 +964,7 @@ sctp_inject_old_unordered_data(struct sctp_tcb *stcb, chk->rec.data.fsn); at = TAILQ_FIRST(&control->reasm); if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) { - /* + /* * The first chunk in the reassembly is * a smaller TSN than this one, even though * this has a first, it must be from a subsequent @@ -997,7 +988,7 @@ sctp_inject_old_unordered_data(struct sctp_tcb *stcb, } if ((chk->rec.data.fsn == control->fsn_included) || (control->pdapi_started)) { - /* + /* * Ok this should not happen, if it does * we started the pd-api on the higher TSN (since * the equals part is a TSN failure it must be that). @@ -1066,7 +1057,7 @@ place_chunk: TAILQ_INSERT_BEFORE(at, chk, sctp_next); break; } else if (at->rec.data.fsn == chk->rec.data.fsn) { - /* + /* * They sent a duplicate fsn number. This * really should not happen since the FSN is * a TSN and it should have been dropped earlier. @@ -1076,7 +1067,6 @@ place_chunk: SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); return; } - } if (inserted == 0) { /* Its at the end */ @@ -1131,13 +1121,23 @@ sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, /* We just put the last bit on */ if (control->on_strm_q) { #ifdef INVARIANTS - if (control->on_strm_q != SCTP_ON_UNORDERED ) { + if (control->on_strm_q != SCTP_ON_UNORDERED) { panic("Huh control: %p on_q: %d -- not unordered?", control, control->on_strm_q); } #endif SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); + if (asoc->size_on_all_streams >= control->length) { + asoc->size_on_all_streams -= control->length; + } else { +#ifdef INVARIANTS + panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); +#else + asoc->size_on_all_streams = 0; +#endif + } + sctp_ucount_decr(asoc->cnt_on_all_streams); control->on_strm_q = 0; } if (control->on_read_q == 0) { @@ -1185,7 +1185,7 @@ done_un: if (control->end_added) { if (control->on_strm_q) { #ifdef INVARIANTS - if (control->on_strm_q != SCTP_ON_ORDERED ) { + if (control->on_strm_q != SCTP_ON_ORDERED) { panic("Huh control: %p on_q: %d -- not ordered?", control, control->on_strm_q); } @@ -1238,7 +1238,7 @@ deliver_more: /* We are done with it afterwards */ if (control->on_strm_q) { #ifdef INVARIANTS - if (control->on_strm_q != SCTP_ON_ORDERED ) { + if (control->on_strm_q != SCTP_ON_ORDERED) { panic("Huh control: %p on_q: %d -- not ordered?", control, control->on_strm_q); } @@ -1300,7 +1300,6 @@ out: return (ret); } - uint32_t sctp_add_chk_to_control(struct sctp_queued_to_read *control, struct sctp_stream_in *strm, @@ -1406,7 +1405,7 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, } /* Must be added to the stream-in queue */ if (created_control) { - if (unordered == 0) { + if ((unordered == 0) || (asoc->idata_supported)) { sctp_ucount_incr(asoc->cnt_on_all_streams); } if (sctp_place_control_in_stream(strm, asoc, control)) { @@ -1497,7 +1496,7 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, } } if (asoc->idata_supported || control->first_frag_seen) { - /* + /* * For IDATA we always check since we know that * the first fragment is 0. For old DATA we have * to receive the first before we know the first FSN @@ -1523,7 +1522,7 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, return; } if (asoc->idata_supported || control->first_frag_seen) { - /* + /* * For IDATA we always check since we know that * the first fragment is 0. For old DATA we have * to receive the first before we know the first FSN @@ -1555,7 +1554,7 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, } /* * If we reach here, we need to place the - * new chunk in the reassembly for this + * new chunk in the reassembly for this * control. */ SCTPDBG(SCTP_DEBUG_XXX, @@ -1563,6 +1562,15 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, chk->rec.data.fsn); TAILQ_FOREACH(at, &control->reasm, sctp_next) { if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { + if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { + /* Last not at the end? huh? */ + SCTPDBG(SCTP_DEBUG_XXX, + "Last fragment not last in list: -- abort\n"); + sctp_abort_in_reasm(stcb, control, + chk, abort_flag, + SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); + return; + } /* * This one in queue is bigger than the new one, insert * the new one before at. @@ -1589,7 +1597,7 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, at->rec.data.fsn); sctp_abort_in_reasm(stcb, control, chk, abort_flag, - SCTP_FROM_SCTP_INDATA + SCTP_LOC_14); + SCTP_FROM_SCTP_INDATA + SCTP_LOC_15); return; } } @@ -1694,6 +1702,7 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, int *break_flag, int last_chunk, uint8_t chk_type) { struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */ + struct sctp_stream_in *strm; uint32_t tsn, fsn, gap, mid; struct mbuf *dmbuf; int the_len; @@ -1745,8 +1754,8 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, * empty data chunk. */ op_err = sctp_generate_no_user_data_cause(tsn); - stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; - sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; + sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); *abort_flag = 1; return (0); } @@ -1812,7 +1821,7 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, * receiver. Send peer an ABORT! */ op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); - sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); + sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); *abort_flag = 1; return (0); } @@ -1863,14 +1872,13 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, if ((chk_type == SCTP_IDATA) && ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) && (fsn == 0)) { - /* - * The first *must* be fsn 0, and other + /* + * The first *must* be fsn 0, and other * (middle/end) pieces can *not* be fsn 0. * XXX: This can happen in case of a wrap around. * Ignore is for now. */ - snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", - mid, chk_flags); + SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags); goto err_out; } control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported); @@ -1881,24 +1889,26 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, if (control != NULL) { /* We found something, does it belong? */ if (ordered && (mid != control->mid)) { - snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid); + SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid); err_out: op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); - stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; - sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17; + sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); *abort_flag = 1; return (0); } if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) { /* We can't have a switched order with an unordered chunk */ - snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", - tsn); + SCTP_SNPRINTF(msg, sizeof(msg), + "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", + tsn); goto err_out; } if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) { /* We can't have a switched unordered with a ordered chunk */ - snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", - tsn); + SCTP_SNPRINTF(msg, sizeof(msg), + "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", + tsn); goto err_out; } } @@ -1912,12 +1922,14 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, if (ordered || asoc->idata_supported) { SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n", chk_flags, mid); - snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid); + SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid); goto err_out; } else { if ((tsn == control->fsn_included + 1) && (control->end_added == 0)) { - snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included); + SCTP_SNPRINTF(msg, sizeof(msg), + "Illegal message sequence, missing end for MID: %8.8x", + control->fsn_included); goto err_out; } else { control = NULL; @@ -1936,7 +1948,7 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, */ if (stcb->sctp_socket->so_rcv.sb_cc) { /* some to read, wake-up */ -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; so = SCTP_INP_SO(stcb->sctp_ep); @@ -1952,7 +1964,7 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, } #endif sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif } @@ -2014,21 +2026,21 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, mid, asoc->strmin[sid].last_mid_delivered); if (asoc->idata_supported) { - snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", - asoc->strmin[sid].last_mid_delivered, - tsn, - sid, - mid); + SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", + asoc->strmin[sid].last_mid_delivered, + tsn, + sid, + mid); } else { - snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", - (uint16_t)asoc->strmin[sid].last_mid_delivered, - tsn, - sid, - (uint16_t)mid); + SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", + (uint16_t)asoc->strmin[sid].last_mid_delivered, + tsn, + sid, + (uint16_t)mid); } op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); - stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; - sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18; + sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); *abort_flag = 1; return (0); } @@ -2224,7 +2236,6 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, unsigned char inserted = 0; TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) { if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) { - continue; } else { /* found it */ @@ -2329,12 +2340,13 @@ finish_express_del: /* All can be removed */ TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); + strm = &asoc->strmin[control->sinfo_stream]; sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); if (*abort_flag) { return (0); } if (need_reasm_check) { - (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD); + (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD); need_reasm_check = 0; } } @@ -2349,12 +2361,13 @@ finish_express_del: * control->sinfo_tsn > liste->tsn */ TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); + strm = &asoc->strmin[control->sinfo_stream]; sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); if (*abort_flag) { return (0); } if (need_reasm_check) { - (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD); + (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD); need_reasm_check = 0; } } @@ -2398,7 +2411,6 @@ static const int8_t sctp_map_lookup_tab[256] = { 0, 1, 0, 2, 0, 1, 0, 8 }; - void sctp_slide_mapping_arrays(struct sctp_tcb *stcb) { @@ -2534,7 +2546,6 @@ sctp_slide_mapping_arrays(struct sctp_tcb *stcb) * we will be able to slide it forward. Really I * don't think this should happen :-0 */ - if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { sctp_log_map((uint32_t) distance, (uint32_t) slide_from, (uint32_t) asoc->mapping_array_size, @@ -2546,7 +2557,6 @@ sctp_slide_mapping_arrays(struct sctp_tcb *stcb) for (ii = 0; ii < distance; ii++) { asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; - } for (ii = distance; ii < asoc->mapping_array_size; ii++) { asoc->mapping_array[ii] = 0; @@ -2598,7 +2608,7 @@ sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, - SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); + SCTP_FROM_SCTP_INDATA + SCTP_LOC_19); } sctp_send_shutdown(stcb, ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); @@ -2618,20 +2628,16 @@ sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) (stcb->asoc.numduptsns) || /* we have dup's */ (is_a_gap) || /* is still a gap */ (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ - (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ - ) { - + (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) { /* hit limit of pkts */ if ((stcb->asoc.sctp_cmt_on_off > 0) && (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && (stcb->asoc.send_sack == 0) && (stcb->asoc.numduptsns == 0) && (stcb->asoc.delayed_ack) && (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { - /* * CMT DAC algorithm: With CMT, * delay acks even in the face of - * reordering. Therefore, if acks * that do not have to be sent * because of the above reasons, @@ -2649,7 +2655,8 @@ sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) * first packet OR there are gaps or * duplicates. */ - (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); + sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, + SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); } } else { @@ -2695,14 +2702,12 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, */ asoc->last_data_chunk_from = net; -#ifndef __Panda__ /*- * Now before we proceed we must figure out if this is a wasted * cluster... i.e. it is a small packet sent in and yet the driver * underneath allocated a full cluster for it. If so we must copy it * to a smaller mbuf and free up the cluster mbuf. This will help - * with cluster starvation. Note for __Panda__ we don't do this - * since it has clusters all the way down to 64 bytes. + * with cluster starvation. */ if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { /* we only handle mbufs that are singletons.. not chains */ @@ -2724,7 +2729,6 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, m = *mm; } } -#endif /* get pointer to the first chunk header */ ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, sizeof(struct sctp_chunkhdr), @@ -2751,10 +2755,10 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, struct mbuf *op_err; char msg[SCTP_DIAG_INFO_LEN]; - snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated"); + SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated"); op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); - stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18; - sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21; + sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); return (2); } if ((asoc->idata_supported == 0) && @@ -2762,10 +2766,10 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, struct mbuf *op_err; char msg[SCTP_DIAG_INFO_LEN]; - snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated"); + SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated"); op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); - stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; - sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22; + sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); return (2); } if ((ch->chunk_type == SCTP_DATA) || @@ -2785,12 +2789,12 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, struct mbuf *op_err; char msg[SCTP_DIAG_INFO_LEN]; - snprintf(msg, sizeof(msg), "%s chunk of length %u", - ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA", - chk_length); + SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u", + ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA", + chk_length); op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); - stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20; - sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23; + sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); return (2); } #ifdef SCTP_AUDITING_ENABLED @@ -2801,7 +2805,7 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, } else { last_chunk = 0; } - if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, + if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, chk_length, net, high_tsn, &abort_flag, &break_flag, last_chunk, ch->chunk_type)) { num_chunks++; @@ -2854,10 +2858,10 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, struct mbuf *op_err; char msg[SCTP_DIAG_INFO_LEN]; - snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x", - ch->chunk_type); + SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x", + ch->chunk_type); op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); - sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); + sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); return (2); } default: @@ -2873,11 +2877,10 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, struct mbuf *op_err; char msg[SCTP_DIAG_INFO_LEN]; - snprintf(msg, sizeof(msg), "Chunk of length %u", - chk_length); + SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length); op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); - stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20; - sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; + sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); return (2); } if (ch->chunk_type & 0x40) { @@ -3116,7 +3119,6 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1 tp1->do_rtt = 0; } } - } if (tp1->sent <= SCTP_DATAGRAM_RESEND) { if (SCTP_TSN_GT(tp1->rec.data.tsn, @@ -3191,7 +3193,6 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1 return (wake_him); /* Return value only used for nr-sack */ } - static int sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, uint32_t last_tsn, uint32_t *biggest_tsn_acked, @@ -3312,7 +3313,6 @@ sctp_check_for_revoked(struct sctp_tcb *stcb, } } - static void sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) @@ -3368,7 +3368,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, if (stcb->asoc.prsctp_supported) { if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { /* Is it expired? */ -#ifndef __FreeBSD__ +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) if (timercmp(&now, &tp1->rec.data.timetodrop, >)) { #else if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { @@ -3381,7 +3381,6 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, continue; } } - } if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) && !(accum_moved && asoc->fast_retran_loss_recovery)) { @@ -3491,7 +3490,6 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, (1) #endif ) { - if (SCTP_TSN_GE(biggest_tsn_newly_acked, tp1->rec.data.fast_retran_tsn)) { /* @@ -3678,9 +3676,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, tp1->whoTo->find_pseudo_cumack = 1; tp1->whoTo->find_rtx_pseudo_cumack = 1; } - } else {/* CMT is OFF */ - #ifdef SCTP_FR_TO_ALTERNATE /* Can we find an alternate? */ alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); @@ -3796,7 +3792,7 @@ sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, * Now is this one marked for resend and its time is * now up? */ -#ifndef __FreeBSD__ +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) if (timercmp(&now, &tp1->rec.data.timetodrop, >)) { #else if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { @@ -3891,11 +3887,10 @@ sctp_fs_audit(struct sctp_association *asoc) return (ret); } - static void sctp_window_probe_recovery(struct sctp_tcb *stcb, - struct sctp_association *asoc, - struct sctp_tmit_chunk *tp1) + struct sctp_association *asoc, + struct sctp_tmit_chunk *tp1) { tp1->window_probe = 0; if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { @@ -4005,11 +4000,12 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, *abort_now = 1; /* XXX */ - snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", - cumack, send_s); + SCTP_SNPRINTF(msg, sizeof(msg), + "Cum ack %8.8x greater or equal than TSN %8.8x", + cumack, send_s); op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); - stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21; - sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; + sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); return; } asoc->this_sack_highest_gap = cumack; @@ -4090,7 +4086,6 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, tp1->whoTo->new_pseudo_cumack = 1; tp1->whoTo->find_pseudo_cumack = 1; tp1->whoTo->find_rtx_pseudo_cumack = 1; - if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { /* sa_ignore NO_NULL_CHK */ sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); @@ -4139,7 +4134,6 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, break; } } - } #if defined(__Userspace__) if (stcb->sctp_ep->recv_callback) { @@ -4159,7 +4153,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, (inp->send_sb_threshold == 0))) { atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); - inp->send_callback(stcb->sctp_socket, sb_free_now); + inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info); SCTP_TCB_LOCK(stcb); atomic_subtract_int(&stcb->asoc.refcnt, 1); } @@ -4169,7 +4163,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, /* sa_ignore NO_NULL_CHK */ if (stcb->sctp_socket) { #endif -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif @@ -4178,7 +4172,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, /* sa_ignore NO_NULL_CHK */ sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); } -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(stcb->sctp_ep); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -4192,7 +4186,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, } #endif sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif } else { @@ -4227,7 +4221,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, net->dest_state &= ~SCTP_ADDR_PF; sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, - SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); + SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); /* Done with this net */ @@ -4302,7 +4296,7 @@ again: } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net, - SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); + SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); } } } @@ -4354,8 +4348,8 @@ again: *abort_now = 1; /* XXX */ op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); - stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; - sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28; + sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); return; } if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && @@ -4377,7 +4371,7 @@ again: sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, - stcb->sctp_ep, stcb, netp); + stcb->sctp_ep, stcb, NULL); } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && (asoc->stream_queue_cnt == 0)) { struct sctp_nets *netp; @@ -4424,10 +4418,15 @@ again: } } } - if (lchk) { + for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) { + if (lchk->whoTo != NULL) { + break; + } + } + if (lchk != NULL) { /* Assure a timer is up */ sctp_timer_start(SCTP_TIMER_TYPE_SEND, - stcb->sctp_ep, stcb, lchk->whoTo); + stcb->sctp_ep, stcb, lchk->whoTo); } } if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { @@ -4560,11 +4559,12 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, hopeless_peer: *abort_now = 1; /* XXX */ - snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", - cum_ack, send_s); + SCTP_SNPRINTF(msg, sizeof(msg), + "Cum ack %8.8x greater or equal than TSN %8.8x", + cum_ack, send_s); op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); - stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; - sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29; + sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); return; } /**********************/ @@ -4595,7 +4595,7 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, /* stop any timers */ TAILQ_FOREACH(net, &asoc->nets, sctp_next) { sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, - stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); + stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); net->partial_bytes_acked = 0; net->flight_size = 0; } @@ -4722,8 +4722,6 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, tp1->whoTo->new_pseudo_cumack = 1; tp1->whoTo->find_pseudo_cumack = 1; tp1->whoTo->find_rtx_pseudo_cumack = 1; - - if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { sctp_log_sack(asoc->last_acked_seq, cum_ack, @@ -4761,7 +4759,6 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, asoc->this_sack_highest_gap = last_tsn; if ((num_seg > 0) || (num_nr_seg > 0)) { - /* * thisSackHighestGap will increase while handling NEW * segments this_sack_highest_newack will increase while @@ -4795,14 +4792,13 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, if (net->new_pseudo_cumack) sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net, - SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); - + SCTP_FROM_SCTP_INDATA + SCTP_LOC_31); } } else { if (accum_moved) { TAILQ_FOREACH(net, &asoc->nets, sctp_next) { sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, - stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); + stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); } } } @@ -4881,7 +4877,7 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, (inp->send_sb_threshold == 0))) { atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); - inp->send_callback(stcb->sctp_socket, sb_free_now); + inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info); SCTP_TCB_LOCK(stcb); atomic_subtract_int(&stcb->asoc.refcnt, 1); } @@ -4891,7 +4887,7 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, /* sa_ignore NO_NULL_CHK */ if ((wake_him) && (stcb->sctp_socket)) { #endif -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif @@ -4899,7 +4895,7 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); } -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(stcb->sctp_ep); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -4913,7 +4909,7 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, } #endif sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif } else { @@ -5007,7 +5003,7 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, net->dest_state &= ~SCTP_ADDR_PF; sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, - SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); + SCTP_FROM_SCTP_INDATA + SCTP_LOC_33); sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); /* Done with this net */ @@ -5032,7 +5028,7 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, /* stop all timers */ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net, - SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); + SCTP_FROM_SCTP_INDATA + SCTP_LOC_34); net->flight_size = 0; net->partial_bytes_acked = 0; } @@ -5070,8 +5066,8 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, *abort_now = 1; /* XXX */ op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); - stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; - sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35; + sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); return; } if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && @@ -5093,7 +5089,7 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, - stcb->sctp_ep, stcb, netp); + stcb->sctp_ep, stcb, NULL); return; } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) && (asoc->stream_queue_cnt == 0)) { @@ -5211,12 +5207,11 @@ again: if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); - } } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net, - SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); + SCTP_FROM_SCTP_INDATA + SCTP_LOC_36); } } } @@ -5281,7 +5276,12 @@ again: } } } - if (lchk) { + for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) { + if (lchk->whoTo != NULL) { + break; + } + } + if (lchk != NULL) { /* Assure a timer is up */ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo); @@ -5430,7 +5430,6 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, control, &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); - } mid = strmin->last_mid_delivered + 1; } else { @@ -5451,17 +5450,12 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, } } - - static void sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, - struct sctp_association *asoc, - uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn) + struct sctp_association *asoc, struct sctp_stream_in *strm, + struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn) { - struct sctp_queued_to_read *control; - struct sctp_stream_in *strm; struct sctp_tmit_chunk *chk, *nchk; - int cnt_removed=0; /* * For now large messages held on the stream reasm that are @@ -5471,23 +5465,18 @@ sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, * delivery function... to see if it can be delivered... But * for now we just dump everything on the queue. */ - strm = &asoc->strmin[stream]; - control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported); - if (control == NULL) { - /* Not found */ - return; - } - if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) { + if (!asoc->idata_supported && !ordered && + control->first_frag_seen && + SCTP_TSN_GT(control->fsn_included, cumtsn)) { return; } TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { /* Purge hanging chunks */ - if (!asoc->idata_supported && (ordered == 0)) { + if (!asoc->idata_supported && !ordered) { if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) { break; } } - cnt_removed++; TAILQ_REMOVE(&control->reasm, chk, sctp_next); if (asoc->size_on_reasm_queue >= chk->send_size) { asoc->size_on_reasm_queue -= chk->send_size; @@ -5566,10 +5555,10 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb, * * Assume we get FwdTSN(x): * - * 1) update local cumTSN to x - * 2) try to further advance cumTSN to x + others we have - * 3) examine and update re-ordering queue on pr-in-streams - * 4) clean up re-assembly queue + * 1) update local cumTSN to x + * 2) try to further advance cumTSN to x + others we have + * 3) examine and update re-ordering queue on pr-in-streams + * 4) clean up re-assembly queue * 5) Send a sack to report where we are. */ struct sctp_association *asoc; @@ -5577,7 +5566,7 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb, unsigned int i, fwd_sz, m_size; uint32_t str_seq; struct sctp_stream_in *strm; - struct sctp_queued_to_read *control, *sv; + struct sctp_queued_to_read *control, *ncontrol, *sv; asoc = &stcb->asoc; if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { @@ -5611,12 +5600,12 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb, * give out). This must be an attacker. */ *abort_flag = 1; - snprintf(msg, sizeof(msg), - "New cum ack %8.8x too high, highest TSN %8.8x", - new_cum_tsn, asoc->highest_tsn_inside_map); + SCTP_SNPRINTF(msg, sizeof(msg), + "New cum ack %8.8x too high, highest TSN %8.8x", + new_cum_tsn, asoc->highest_tsn_inside_map); op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); - stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33; - sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37; + sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); return; } SCTP_STAT_INCR(sctps_fwdtsn_map_over); @@ -5650,10 +5639,14 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb, /* This is now done as part of clearing up the stream/seq */ if (asoc->idata_supported == 0) { uint16_t sid; + /* Flush all the un-ordered data based on cum-tsn */ SCTP_INP_READ_LOCK(stcb->sctp_ep); - for (sid = 0 ; sid < asoc->streamincnt; sid++) { - sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn); + for (sid = 0; sid < asoc->streamincnt; sid++) { + strm = &asoc->strmin[sid]; + if (!TAILQ_EMPTY(&strm->uno_inqueue)) { + sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn); + } } SCTP_INP_READ_UNLOCK(stcb->sctp_ep); } @@ -5665,7 +5658,7 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb, if (m && fwd_sz) { /* New method. */ unsigned int num_str; - uint32_t mid, cur_mid; + uint32_t mid; uint16_t sid; uint16_t ordered, flags; struct sctp_strseq *stseq, strseqbuf; @@ -5729,8 +5722,24 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb, asoc->fragmented_delivery_inprogress = 0; } strm = &asoc->strmin[sid]; - for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) { - sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn); + if (ordered) { + TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) { + if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { + sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn); + } + } + } else { + if (asoc->idata_supported) { + TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) { + if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { + sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn); + } + } + } else { + if (!TAILQ_EMPTY(&strm->uno_inqueue)) { + sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn); + } + } } TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) { if ((control->sinfo_stream == sid) && diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_indata.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_indata.h index 96c8bff4e..651493b91 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_indata.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_indata.h @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.h 351655 2019-09-01 10:39:16Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.h 365071 2020-09-01 21:19:14Z mjg $"); #endif #ifndef _NETINET_SCTP_INDATA_H_ @@ -44,12 +44,11 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.h 351655 2019-09-01 10:39:16Z t struct sctp_queued_to_read * sctp_build_readq_entry(struct sctp_tcb *stcb, - struct sctp_nets *net, - uint32_t tsn, uint32_t ppid, - uint32_t context, uint16_t sid, - uint32_t mid, uint8_t flags, - struct mbuf *dm); - + struct sctp_nets *net, + uint32_t tsn, uint32_t ppid, + uint32_t context, uint16_t sid, + uint32_t mid, uint8_t flags, + struct mbuf *dm); #define sctp_build_readq_entry_mac(_ctl, in_it, context, net, tsn, ppid, sid, flags, dm, tfsn, mid) do { \ if (_ctl) { \ @@ -70,14 +69,15 @@ sctp_build_readq_entry(struct sctp_tcb *stcb, (_ctl)->data = dm; \ (_ctl)->stcb = (in_it); \ (_ctl)->port_from = (in_it)->rport; \ + if ((in_it)->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { \ + (_ctl)->do_not_ref_stcb = 1; \ + }\ } \ } while (0) - - struct mbuf * sctp_build_ctl_nchunk(struct sctp_inpcb *inp, - struct sctp_sndrcvinfo *sinfo); + struct sctp_sndrcvinfo *sinfo); void sctp_set_rwnd(struct sctp_tcb *, struct sctp_association *); @@ -86,7 +86,7 @@ sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc); void sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, - uint32_t rwnd, int *abort_now, int ecne_seen); + uint32_t rwnd, int *abort_now, int ecne_seen); void sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, @@ -98,7 +98,7 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, /* draft-ietf-tsvwg-usctp */ void sctp_handle_forward_tsn(struct sctp_tcb *, - struct sctp_forward_tsn_chunk *, int *, struct mbuf *, int); + struct sctp_forward_tsn_chunk *, int *, struct mbuf *, int); struct sctp_tmit_chunk * sctp_try_advance_peer_ack_point(struct sctp_tcb *, struct sctp_association *); @@ -110,8 +110,8 @@ sctp_update_acked(struct sctp_tcb *, struct sctp_shutdown_chunk *, int *); int sctp_process_data(struct mbuf **, int, int *, int, - struct sctp_inpcb *, struct sctp_tcb *, - struct sctp_nets *, uint32_t *); + struct sctp_inpcb *, struct sctp_tcb *, + struct sctp_nets *, uint32_t *); void sctp_slide_mapping_arrays(struct sctp_tcb *stcb); diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_input.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_input.c index 5afe7813b..786ea530f 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_input.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_input.c @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 355135 2019-11-27 19:32:29Z tuexen $"); +__FBSDID("$FreeBSD$"); #endif #include @@ -51,23 +51,18 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 355135 2019-11-27 19:32:29Z tu #include #include #include -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #endif #if defined(INET) || defined(INET6) -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) #include #endif #endif -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #endif -#if defined(__APPLE__) -#define APPLE_FILE_NO 2 -#endif - - static void sctp_stop_all_cookie_timers(struct sctp_tcb *stcb) { @@ -99,8 +94,8 @@ static void sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, struct sctp_init_chunk *cp, struct sctp_inpcb *inp, - struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_no_unlock, -#if defined(__FreeBSD__) + struct sctp_tcb *stcb, struct sctp_nets *net, +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t mflowtype, uint32_t mflowid, #endif uint32_t vrf_id, uint16_t port) @@ -113,66 +108,21 @@ sctp_handle_init(struct mbuf *m, int iphlen, int offset, if (stcb == NULL) { SCTP_INP_RLOCK(inp); } - /* validate length */ - if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) { - op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); - sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, -#if defined(__FreeBSD__) - mflowtype, mflowid, -#endif - vrf_id, port); - if (stcb) - *abort_no_unlock = 1; - goto outnow; - } - /* validate parameters */ + /* Validate parameters */ init = &cp->init; - if (init->initiate_tag == 0) { - /* protocol error... send abort */ - op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); - sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, -#if defined(__FreeBSD__) - mflowtype, mflowid, -#endif - vrf_id, port); - if (stcb) - *abort_no_unlock = 1; + if (ntohl(init->initiate_tag) == 0) { goto outnow; } - if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) { - /* invalid parameter... send abort */ - op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); - sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, -#if defined(__FreeBSD__) - mflowtype, mflowid, -#endif - vrf_id, port); - if (stcb) - *abort_no_unlock = 1; - goto outnow; - } - if (init->num_inbound_streams == 0) { + if ((ntohl(init->a_rwnd) < SCTP_MIN_RWND) || + (ntohs(init->num_inbound_streams) == 0) || + (ntohs(init->num_outbound_streams) == 0)) { /* protocol error... send abort */ op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); - sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, -#if defined(__FreeBSD__) - mflowtype, mflowid, + sctp_send_abort(m, iphlen, src, dst, sh, init->initiate_tag, op_err, +#if defined(__FreeBSD__) && !defined(__Userspace__) + mflowtype, mflowid, inp->fibnum, #endif - vrf_id, port); - if (stcb) - *abort_no_unlock = 1; - goto outnow; - } - if (init->num_outbound_streams == 0) { - /* protocol error... send abort */ - op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); - sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, -#if defined(__FreeBSD__) - mflowtype, mflowid, -#endif - vrf_id, port); - if (stcb) - *abort_no_unlock = 1; + vrf_id, port); goto outnow; } if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), @@ -180,13 +130,11 @@ sctp_handle_init(struct mbuf *m, int iphlen, int offset, /* auth parameter(s) error... send abort */ op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Problem with AUTH parameters"); - sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, -#if defined(__FreeBSD__) - mflowtype, mflowid, + sctp_send_abort(m, iphlen, src, dst, sh, init->initiate_tag, op_err, +#if defined(__FreeBSD__) && !defined(__Userspace__) + mflowtype, mflowid, inp->fibnum, #endif - vrf_id, port); - if (stcb) - *abort_no_unlock = 1; + vrf_id, port); goto outnow; } /* We are only accepting if we have a listening socket.*/ @@ -210,7 +158,7 @@ sctp_handle_init(struct mbuf *m, int iphlen, int offset, op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "No listener"); sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, inp->fibnum, #endif vrf_id, port); @@ -226,7 +174,7 @@ sctp_handle_init(struct mbuf *m, int iphlen, int offset, SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); sctp_send_initiate_ack(inp, stcb, net, m, iphlen, offset, src, dst, sh, cp, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, #endif vrf_id, port); @@ -242,11 +190,7 @@ sctp_handle_init(struct mbuf *m, int iphlen, int offset, */ int -sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif -) +sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked) { int unsent_data; unsigned int i; @@ -333,7 +277,6 @@ sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb) if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) { sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); } - } } SCTP_TCB_SEND_LOCK(stcb); @@ -466,7 +409,7 @@ sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_no_unlock, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t mflowtype, uint32_t mflowid, #endif uint32_t vrf_id) @@ -487,7 +430,7 @@ sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, &nat_friendly, &cookie_found); if (abort_flag) { /* Send an abort and notify peer */ - sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); + sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED); *abort_no_unlock = 1; return (-1); } @@ -514,7 +457,7 @@ sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, } sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, src, dst, sh, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, #endif vrf_id, net->port); @@ -524,29 +467,37 @@ sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, asoc = &stcb->asoc; asoc->peer_supports_nat = (uint8_t)nat_friendly; /* process the peer's parameters in the INIT-ACK */ - retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb); - if (retval < 0) { + if (sctp_process_init((struct sctp_init_chunk *)cp, stcb) < 0) { if (op_err != NULL) { sctp_m_freem(op_err); } - return (retval); + op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); + SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n"); + sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, + src, dst, sh, op_err, +#if defined(__FreeBSD__) && !defined(__Userspace__) + mflowtype, mflowid, +#endif + vrf_id, net->port); + *abort_no_unlock = 1; + return (-1); } initack_limit = offset + ntohs(cp->ch.chunk_length); /* load all addresses */ if ((retval = sctp_load_addresses_from_init(stcb, m, - (offset + sizeof(struct sctp_init_chunk)), initack_limit, - src, dst, NULL, stcb->asoc.port))) { + offset + sizeof(struct sctp_init_chunk), + initack_limit, src, dst, NULL, stcb->asoc.port)) < 0) { if (op_err != NULL) { sctp_m_freem(op_err); } op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Problem with address parameters"); SCTPDBG(SCTP_DEBUG_INPUT1, - "Load addresses from INIT causes an abort %d\n", - retval); + "Load addresses from INIT causes an abort %d\n", + retval); sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, src, dst, sh, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, #endif vrf_id, net->port); @@ -611,7 +562,7 @@ sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, (inp->send_sb_threshold == 0))) { atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); - inp->send_callback(stcb->sctp_socket, sb_free_now); + inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info); SCTP_TCB_LOCK(stcb); atomic_subtract_int(&stcb->asoc.refcnt, 1); } @@ -764,22 +715,20 @@ sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, SCTP_MOBILITY_FASTHANDOFF)) && sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_PRIM_DELETED)) { - sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); if (sctp_is_mobility_feature_on(stcb->sctp_ep, - SCTP_MOBILITY_FASTHANDOFF)) { + SCTP_MOBILITY_FASTHANDOFF)) { sctp_assoc_immediate_retrans(stcb, - stcb->asoc.primary_destination); + stcb->asoc.primary_destination); } if (sctp_is_mobility_feature_on(stcb->sctp_ep, - SCTP_MOBILITY_BASE)) { + SCTP_MOBILITY_BASE)) { sctp_move_chunks_from_net(stcb, - stcb->asoc.deleted_primary); + stcb->asoc.deleted_primary); } - sctp_delete_prim_timer(stcb->sctp_ep, stcb, - stcb->asoc.deleted_primary); + sctp_delete_prim_timer(stcb->sctp_ep, stcb); } } } @@ -796,7 +745,6 @@ sctp_handle_nat_colliding_state(struct sctp_tcb *stcb) if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { - new_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); SCTP_INP_INFO_WLOCK(); @@ -805,6 +753,7 @@ sctp_handle_nat_colliding_state(struct sctp_tcb *stcb) } else { return (0); } + new_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1); if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { /* generate a new vtag and send init */ LIST_REMOVE(stcb, sctp_asocs); @@ -852,13 +801,12 @@ sctp_handle_nat_missing_state(struct sctp_tcb *stcb, return (1); } - /* Returns 1 if the stcb was aborted, 0 otherwise */ static int sctp_handle_abort(struct sctp_abort_chunk *abort, struct sctp_tcb *stcb, struct sctp_nets *net) { -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif uint16_t len; @@ -879,13 +827,13 @@ sctp_handle_abort(struct sctp_abort_chunk *abort, cause = (struct sctp_error_cause *)(abort + 1); error = ntohs(cause->code); if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) { - SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n", + SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state, ABORT flags:%x\n", abort->ch.chunk_flags); if (sctp_handle_nat_colliding_state(stcb)) { return (0); } } else if (error == SCTP_CAUSE_NAT_MISSING_STATE) { - SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n", + SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state, ABORT flags:%x\n", abort->ch.chunk_flags); if (sctp_handle_nat_missing_state(stcb, net)) { return (0); @@ -895,10 +843,10 @@ sctp_handle_abort(struct sctp_abort_chunk *abort, error = 0; } /* stop any receive timers */ - sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, + sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INPUT + SCTP_LOC_7); /* notify user of the abort and clean up... */ - sctp_abort_notification(stcb, 1, error, abort, SCTP_SO_NOT_LOCKED); + sctp_abort_notification(stcb, true, false, error, abort, SCTP_SO_NOT_LOCKED); /* free the tcb */ SCTP_STAT_INCR_COUNTER32(sctps_aborted); if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || @@ -908,7 +856,7 @@ sctp_handle_abort(struct sctp_abort_chunk *abort, #ifdef SCTP_ASOCLOG_OF_TSNS sctp_print_out_track_log(stcb); #endif -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(stcb->sctp_ep); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -916,10 +864,9 @@ sctp_handle_abort(struct sctp_abort_chunk *abort, SCTP_TCB_LOCK(stcb); atomic_subtract_int(&stcb->asoc.refcnt, 1); #endif - SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); @@ -956,7 +903,6 @@ sctp_start_net_timers(struct sctp_tcb *stcb) } } - static void sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) @@ -964,7 +910,7 @@ sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, struct sctp_association *asoc; int some_on_streamwheel; int old_state; -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif @@ -1015,7 +961,7 @@ sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, asoc->control_pdapi->pdapi_aborted = 1; asoc->control_pdapi = NULL; SCTP_INP_READ_UNLOCK(stcb->sctp_ep); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(stcb->sctp_ep); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -1031,7 +977,7 @@ sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, if (stcb->sctp_socket) { sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); } -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif } @@ -1090,7 +1036,7 @@ sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED, struct sctp_nets *net) { struct sctp_association *asoc; -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; so = SCTP_INP_SO(stcb->sctp_ep); @@ -1124,7 +1070,7 @@ sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED, asoc->control_pdapi->pdapi_aborted = 1; asoc->control_pdapi = NULL; SCTP_INP_READ_UNLOCK(stcb->sctp_ep); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); SCTP_SOCKET_LOCK(so, 1); @@ -1137,7 +1083,7 @@ sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED, } #endif sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif } @@ -1163,7 +1109,7 @@ sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED, } SCTP_STAT_INCR_COUNTER32(sctps_shutdown); /* free the TCB but first save off the ep */ -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); SCTP_SOCKET_LOCK(so, 1); @@ -1172,19 +1118,18 @@ sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED, #endif (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif } static void -sctp_process_unrecog_chunk(struct sctp_tcb *stcb, uint8_t chunk_type, - struct sctp_nets *net) +sctp_process_unrecog_chunk(struct sctp_tcb *stcb, uint8_t chunk_type) { switch (chunk_type) { case SCTP_ASCONF_ACK: case SCTP_ASCONF: - sctp_asconf_cleanup(stcb, net); + sctp_asconf_cleanup(stcb); break; case SCTP_IFORWARD_CUM_TSN: case SCTP_FORWARD_CUM_TSN: @@ -1246,7 +1191,7 @@ sctp_handle_error(struct sctp_chunkhdr *ch, struct sctp_association *asoc; uint32_t remaining_length, adjust; uint16_t code, cause_code, cause_length; -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif @@ -1287,14 +1232,14 @@ sctp_handle_error(struct sctp_chunkhdr *ch, cause_code); break; case SCTP_CAUSE_NAT_COLLIDING_STATE: - SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags: %x\n", + SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state, ERROR flags: %x\n", ch->chunk_flags); if (sctp_handle_nat_colliding_state(stcb)) { return (0); } break; case SCTP_CAUSE_NAT_MISSING_STATE: - SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags: %x\n", + SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state, ERROR flags: %x\n", ch->chunk_flags); if (sctp_handle_nat_missing_state(stcb, net)) { return (0); @@ -1310,19 +1255,16 @@ sctp_handle_error(struct sctp_chunkhdr *ch, struct sctp_error_stale_cookie *stale_cookie; stale_cookie = (struct sctp_error_stale_cookie *)cause; - asoc->cookie_preserve_req = ntohl(stale_cookie->stale_time); - /* Double it to be more robust on RTX */ - if (asoc->cookie_preserve_req <= UINT32_MAX / 2) { - asoc->cookie_preserve_req *= 2; - } else { - asoc->cookie_preserve_req = UINT32_MAX; - } + /* stable_time is in usec, convert to msec. */ + asoc->cookie_preserve_req = ntohl(stale_cookie->stale_time) / 1000; + /* Double it to be more robust on RTX. */ + asoc->cookie_preserve_req *= 2; asoc->stale_cookie_count++; if (asoc->stale_cookie_count > asoc->max_init_times) { - sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); + sctp_abort_notification(stcb, false, true, 0, NULL, SCTP_SO_NOT_LOCKED); /* now free the asoc */ -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(stcb->sctp_ep); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -1332,7 +1274,7 @@ sctp_handle_error(struct sctp_chunkhdr *ch, #endif (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif return (-1); @@ -1362,7 +1304,7 @@ sctp_handle_error(struct sctp_chunkhdr *ch, struct sctp_error_unrecognized_chunk *unrec_chunk; unrec_chunk = (struct sctp_error_unrecognized_chunk *)cause; - sctp_process_unrecog_chunk(stcb, unrec_chunk->ch.chunk_type, net); + sctp_process_unrecog_chunk(stcb, unrec_chunk->ch.chunk_type); } break; case SCTP_CAUSE_UNRECOG_PARAM: @@ -1423,7 +1365,7 @@ sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_no_unlock, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t mflowtype, uint32_t mflowid, #endif uint32_t vrf_id) @@ -1439,73 +1381,25 @@ sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, "sctp_handle_init_ack: TCB is null\n"); return (-1); } - if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) { - /* Invalid length */ - op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); - sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, - src, dst, sh, op_err, -#if defined(__FreeBSD__) - mflowtype, mflowid, -#endif - vrf_id, net->port); - *abort_no_unlock = 1; - return (-1); - } - init_ack = &cp->init; - /* validate parameters */ - if (init_ack->initiate_tag == 0) { - /* protocol error... send an abort */ - op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); - sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, - src, dst, sh, op_err, -#if defined(__FreeBSD__) - mflowtype, mflowid, -#endif - vrf_id, net->port); - *abort_no_unlock = 1; - return (-1); - } - if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) { - /* protocol error... send an abort */ - op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); - sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, - src, dst, sh, op_err, -#if defined(__FreeBSD__) - mflowtype, mflowid, -#endif - vrf_id, net->port); - *abort_no_unlock = 1; - return (-1); - } - if (init_ack->num_inbound_streams == 0) { - /* protocol error... send an abort */ - op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); - sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, - src, dst, sh, op_err, -#if defined(__FreeBSD__) - mflowtype, mflowid, -#endif - vrf_id, net->port); - *abort_no_unlock = 1; - return (-1); - } - if (init_ack->num_outbound_streams == 0) { - /* protocol error... send an abort */ - op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); - sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, - src, dst, sh, op_err, -#if defined(__FreeBSD__) - mflowtype, mflowid, -#endif - vrf_id, net->port); - *abort_no_unlock = 1; - return (-1); - } - /* process according to association state... */ - switch (SCTP_GET_STATE(stcb)) { - case SCTP_STATE_COOKIE_WAIT: - /* this is the expected state for this chunk */ - /* process the INIT-ACK parameters */ + /* Only process the INIT-ACK chunk in COOKIE WAIT state.*/ + if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) { + init_ack = &cp->init; + /* Validate parameters. */ + if ((ntohl(init_ack->initiate_tag) == 0) || + (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) || + (ntohs(init_ack->num_inbound_streams) == 0) || + (ntohs(init_ack->num_outbound_streams) == 0)) { + /* One of the mandatory parameters is illegal. */ + op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); + sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, + src, dst, sh, op_err, + #if defined(__FreeBSD__) && !defined(__Userspace__) + mflowtype, mflowid, + #endif + vrf_id, net->port); + *abort_no_unlock = 1; + return (-1); + } if (stcb->asoc.primary_destination->dest_state & SCTP_ADDR_UNCONFIRMED) { /* @@ -1521,56 +1415,42 @@ sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, } if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb, net, abort_no_unlock, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, #endif vrf_id) < 0) { /* error in parsing parameters */ return (-1); } - /* update our state */ + /* Update our state. */ SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_ECHOED); - /* reset the RTO calc */ + /* Reset the RTO calculation. */ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { sctp_misc_ints(SCTP_THRESHOLD_CLEAR, - stcb->asoc.overall_error_count, - 0, - SCTP_FROM_SCTP_INPUT, - __LINE__); + stcb->asoc.overall_error_count, + 0, + SCTP_FROM_SCTP_INPUT, + __LINE__); } stcb->asoc.overall_error_count = 0; (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); /* - * collapse the init timer back in case of a exponential - * backoff + * Collapse the init timer back in case of a exponential + * backoff. */ sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, stcb, net); /* - * the send at the end of the inbound data processing will - * cause the cookie to be sent + * The output routine at the end of the inbound data processing + * will cause the cookie to be sent. */ - break; - case SCTP_STATE_SHUTDOWN_SENT: - /* incorrect state... discard */ - break; - case SCTP_STATE_COOKIE_ECHOED: - /* incorrect state... discard */ - break; - case SCTP_STATE_OPEN: - /* incorrect state... discard */ - break; - case SCTP_STATE_EMPTY: - case SCTP_STATE_INUSE: - default: - /* incorrect state... discard */ + SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); + return (0); + } else { return (-1); - break; } - SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); - return (0); } static struct sctp_tcb * @@ -1580,12 +1460,11 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, struct sctp_inpcb *inp, struct sctp_nets **netp, struct sockaddr *init_src, int *notification, int auth_skipped, uint32_t auth_offset, uint32_t auth_len, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t mflowtype, uint32_t mflowid, #endif uint32_t vrf_id, uint16_t port); - /* * handle a state cookie for an existing association m: input packet mbuf * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a @@ -1599,7 +1478,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp, struct sockaddr *init_src, int *notification, int auth_skipped, uint32_t auth_offset, uint32_t auth_len, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t mflowtype, uint32_t mflowid, #endif uint32_t vrf_id, uint16_t port) @@ -1607,6 +1486,11 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, struct sctp_association *asoc; struct sctp_init_chunk *init_cp, init_buf; struct sctp_init_ack_chunk *initack_cp, initack_buf; + struct sctp_asconf_addr *aparam, *naparam; + struct sctp_asconf_ack *aack, *naack; + struct sctp_tmit_chunk *chk, *nchk; + struct sctp_stream_reset_list *strrst, *nstrrst; + struct sctp_queued_to_read *sq, *nsq; struct sctp_nets *net; struct mbuf *op_err; struct timeval old; @@ -1633,12 +1517,13 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, ""); sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, inp->fibnum, #endif vrf_id, net->port); if (how_indx < sizeof(asoc->cookie_how)) asoc->cookie_how[how_indx] = 2; + SCTP_TCB_UNLOCK(stcb); return (NULL); } /* @@ -1653,9 +1538,11 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, (uint8_t *) & init_buf); if (init_cp == NULL) { /* could not pull a INIT chunk in cookie */ + SCTP_TCB_UNLOCK(stcb); return (NULL); } if (init_cp->ch.chunk_type != SCTP_INITIATION) { + SCTP_TCB_UNLOCK(stcb); return (NULL); } /* @@ -1668,9 +1555,11 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, (uint8_t *) & initack_buf); if (initack_cp == NULL) { /* could not pull INIT-ACK chunk in cookie */ + SCTP_TCB_UNLOCK(stcb); return (NULL); } if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { + SCTP_TCB_UNLOCK(stcb); return (NULL); } if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && @@ -1688,7 +1577,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, * ----INIT-ACK(tag=t)--> * ----INIT(tag=t)------> *1 * <---INIT-ACK(tag=a)--- - * <----CE(tag=t)------------- *2 + * <----CE(tag=t)------------- *2 * * At point *1 we should be generating a different * tag t'. Which means we would throw away the CE and send @@ -1696,8 +1585,8 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, */ if (how_indx < sizeof(asoc->cookie_how)) asoc->cookie_how[how_indx] = 17; + SCTP_TCB_UNLOCK(stcb); return (NULL); - } switch (SCTP_GET_STATE(stcb)) { case SCTP_STATE_COOKIE_WAIT: @@ -1709,10 +1598,17 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, * have the right seq no's. */ /* First we must process the INIT !! */ - retval = sctp_process_init(init_cp, stcb); - if (retval < 0) { + if (sctp_process_init(init_cp, stcb) < 0) { if (how_indx < sizeof(asoc->cookie_how)) asoc->cookie_how[how_indx] = 3; + op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); + SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n"); + sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, + src, dst, sh, op_err, +#if defined(__FreeBSD__) && !defined(__Userspace__) + mflowtype, mflowid, +#endif + vrf_id, net->port); return (NULL); } /* we have already processed the INIT so no problem */ @@ -1731,14 +1627,14 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, SCTP_SET_STATE(stcb, SCTP_STATE_OPEN); if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, - stcb->sctp_ep, stcb, asoc->primary_destination); + stcb->sctp_ep, stcb, NULL); } SCTP_STAT_INCR_GAUGE32(sctps_currestab); sctp_stop_all_cookie_timers(stcb); if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && (!SCTP_IS_LISTENING(inp))) { -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif /* @@ -1749,7 +1645,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, */ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(stcb->sctp_ep); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -1757,12 +1653,13 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, SCTP_TCB_LOCK(stcb); atomic_add_int(&stcb->asoc.refcnt, -1); if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { + SCTP_TCB_UNLOCK(stcb); SCTP_SOCKET_UNLOCK(so, 1); return (NULL); } #endif soisconnected(stcb->sctp_socket); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif } @@ -1781,7 +1678,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, if (stcb->asoc.sctp_autoclose_ticks && (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, - inp, stcb, NULL); + inp, stcb, NULL); } break; default: @@ -1792,16 +1689,22 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, break; } /* end switch */ sctp_stop_all_cookie_timers(stcb); - /* - * We ignore the return code here.. not sure if we should - * somehow abort.. but we do have an existing asoc. This - * really should not fail. - */ - if (sctp_load_addresses_from_init(stcb, m, - init_offset + sizeof(struct sctp_init_chunk), - initack_offset, src, dst, init_src, stcb->asoc.port)) { + if ((retval = sctp_load_addresses_from_init(stcb, m, + init_offset + sizeof(struct sctp_init_chunk), + initack_offset, src, dst, init_src, stcb->asoc.port)) < 0) { if (how_indx < sizeof(asoc->cookie_how)) asoc->cookie_how[how_indx] = 4; + op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), + "Problem with address parameters"); + SCTPDBG(SCTP_DEBUG_INPUT1, + "Load addresses from INIT causes an abort %d\n", + retval); + sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, + src, dst, sh, op_err, +#if defined(__FreeBSD__) && !defined(__Userspace__) + mflowtype, mflowid, +#endif + vrf_id, net->port); return (NULL); } /* respond with a COOKIE-ACK */ @@ -1821,6 +1724,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, */ if (how_indx < sizeof(asoc->cookie_how)) asoc->cookie_how[how_indx] = 6; + SCTP_TCB_UNLOCK(stcb); return (NULL); } /* If nat support, and the below and stcb is established, @@ -1842,10 +1746,11 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, */ op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, ""); sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, inp->fibnum, #endif vrf_id, port); + SCTP_TCB_UNLOCK(stcb); return (NULL); } if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && @@ -1875,6 +1780,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, if (how_indx < sizeof(asoc->cookie_how)) asoc->cookie_how[how_indx] = 7; + SCTP_TCB_UNLOCK(stcb); return (NULL); } if (how_indx < sizeof(asoc->cookie_how)) @@ -1890,10 +1796,12 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, if (stcb->asoc.sctp_autoclose_ticks && sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, - NULL); + NULL); } asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); - asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); + if (asoc->pre_open_streams < asoc->streamoutcnt) { + asoc->pre_open_streams = asoc->streamoutcnt; + } if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { /* Ok the peer probably discarded our @@ -1903,8 +1811,8 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, * kick us so it COULD still take a timeout * to move these.. but it can't hurt to mark them. */ - struct sctp_tmit_chunk *chk; - TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { + + TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { if (chk->sent < SCTP_DATAGRAM_RESEND) { chk->sent = SCTP_DATAGRAM_RESEND; sctp_flight_size_decrease(chk); @@ -1913,20 +1821,37 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, spec_flag++; } } - } /* process the INIT info (peer's info) */ - retval = sctp_process_init(init_cp, stcb); - if (retval < 0) { + if (sctp_process_init(init_cp, stcb) < 0) { if (how_indx < sizeof(asoc->cookie_how)) asoc->cookie_how[how_indx] = 9; + op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); + SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n"); + sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, + src, dst, sh, op_err, +#if defined(__FreeBSD__) && !defined(__Userspace__) + mflowtype, mflowid, +#endif + vrf_id, net->port); return (NULL); } - if (sctp_load_addresses_from_init(stcb, m, - init_offset + sizeof(struct sctp_init_chunk), - initack_offset, src, dst, init_src, stcb->asoc.port)) { + if ((retval = sctp_load_addresses_from_init(stcb, m, + init_offset + sizeof(struct sctp_init_chunk), + initack_offset, src, dst, init_src, stcb->asoc.port)) < 0) { if (how_indx < sizeof(asoc->cookie_how)) asoc->cookie_how[how_indx] = 10; + op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), + "Problem with address parameters"); + SCTPDBG(SCTP_DEBUG_INPUT1, + "Load addresses from INIT causes an abort %d\n", + retval); + sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, + src, dst, sh, op_err, +#if defined(__FreeBSD__) && !defined(__Userspace__) + mflowtype, mflowid, +#endif + vrf_id, net->port); return (NULL); } if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || @@ -1936,12 +1861,11 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && (!SCTP_IS_LISTENING(inp))) { -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif - stcb->sctp_ep->sctp_flags |= - SCTP_PCB_FLAGS_CONNECTED; -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) + stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(stcb->sctp_ep); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -1949,12 +1873,13 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, SCTP_TCB_LOCK(stcb); atomic_add_int(&stcb->asoc.refcnt, -1); if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { + SCTP_TCB_UNLOCK(stcb); SCTP_SOCKET_UNLOCK(so, 1); return (NULL); } #endif soisconnected(stcb->sctp_socket); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif } @@ -1971,7 +1896,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, SCTP_SET_STATE(stcb, SCTP_STATE_OPEN); if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, - stcb->sctp_ep, stcb, asoc->primary_destination); + stcb->sctp_ep, stcb, NULL); } sctp_stop_all_cookie_timers(stcb); sctp_toss_old_cookies(stcb, asoc); @@ -1996,24 +1921,30 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && cookie->tie_tag_peer_vtag != 0) { struct sctpasochead *head; -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif if (asoc->peer_supports_nat) { + struct sctp_tcb *local_stcb; + /* This is a gross gross hack. * Just call the cookie_new code since we * are allowing a duplicate association. * I hope this works... */ - return (sctp_process_cookie_new(m, iphlen, offset, src, dst, - sh, cookie, cookie_len, - inp, netp, init_src,notification, - auth_skipped, auth_offset, auth_len, -#if defined(__FreeBSD__) - mflowtype, mflowid, + local_stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, + sh, cookie, cookie_len, + inp, netp, init_src,notification, + auth_skipped, auth_offset, auth_len, +#if defined(__FreeBSD__) && !defined(__Userspace__) + mflowtype, mflowid, #endif - vrf_id, port)); + vrf_id, port); + if (local_stcb == NULL) { + SCTP_TCB_UNLOCK(stcb); + } + return (local_stcb); } /* * case A in Section 5.2.4 Table 2: XXMM (peer restarted) @@ -2021,11 +1952,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, /* temp code */ if (how_indx < sizeof(asoc->cookie_how)) asoc->cookie_how[how_indx] = 12; - sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, - SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); - sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, - SCTP_FROM_SCTP_INPUT + SCTP_LOC_17); - + sctp_stop_association_timers(stcb, false); /* notify upper layer */ *notification = SCTP_NOTIFY_ASSOC_RESTART; atomic_add_int(&stcb->asoc.refcnt, 1); @@ -2042,23 +1969,26 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { SCTP_SET_STATE(stcb, SCTP_STATE_OPEN); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, - stcb->sctp_ep, stcb, asoc->primary_destination); + stcb->sctp_ep, stcb, NULL); } else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) { /* move to OPEN state, if not in SHUTDOWN_SENT */ SCTP_SET_STATE(stcb, SCTP_STATE_OPEN); } - asoc->pre_open_streams = - ntohs(initack_cp->init.num_outbound_streams); + if (asoc->pre_open_streams < asoc->streamoutcnt) { + asoc->pre_open_streams = asoc->streamoutcnt; + } asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; - asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; - asoc->str_reset_seq_in = asoc->init_seq_number; - asoc->advanced_peer_ack_point = asoc->last_acked_seq; + asoc->send_sack = 1; + asoc->data_pkts_seen = 0; + asoc->last_data_chunk_from = NULL; + asoc->last_control_chunk_from = NULL; + asoc->last_net_cmt_send_started = NULL; if (asoc->mapping_array) { memset(asoc->mapping_array, 0, asoc->mapping_array_size); @@ -2068,7 +1998,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, asoc->mapping_array_size); } SCTP_TCB_UNLOCK(stcb); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(stcb->sctp_ep); SCTP_SOCKET_LOCK(so, 1); #endif @@ -2079,7 +2009,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, /* send up all the data */ SCTP_TCB_SEND_LOCK(stcb); - sctp_report_all_outbound(stcb, 0, 1, SCTP_SO_LOCKED); + sctp_report_all_outbound(stcb, 0, SCTP_SO_LOCKED); for (i = 0; i < stcb->asoc.streamoutcnt; i++) { stcb->asoc.strmout[i].chunks_on_queues = 0; #if defined(SCTP_DETAILED_STR_STATS) @@ -2091,11 +2021,65 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, asoc->strmout[i].abandoned_sent[0] = 0; asoc->strmout[i].abandoned_unsent[0] = 0; #endif - stcb->asoc.strmout[i].sid = i; stcb->asoc.strmout[i].next_mid_ordered = 0; stcb->asoc.strmout[i].next_mid_unordered = 0; + stcb->asoc.strmout[i].sid = i; stcb->asoc.strmout[i].last_msg_incomplete = 0; } + TAILQ_FOREACH_SAFE(strrst, &asoc->resetHead, next_resp, nstrrst) { + TAILQ_REMOVE(&asoc->resetHead, strrst, next_resp); + SCTP_FREE(strrst, SCTP_M_STRESET); + } + TAILQ_FOREACH_SAFE(sq, &asoc->pending_reply_queue, next, nsq) { + TAILQ_REMOVE(&asoc->pending_reply_queue, sq, next); + if (sq->data) { + sctp_m_freem(sq->data); + sq->data = NULL; + } + sctp_free_remote_addr(sq->whoFrom); + sq->whoFrom = NULL; + sq->stcb = NULL; + sctp_free_a_readq(stcb, sq); + } + TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { + TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); + if (chk->data) { + sctp_m_freem(chk->data); + chk->data = NULL; + } + if (chk->holds_key_ref) + sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); + sctp_free_remote_addr(chk->whoTo); + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); + SCTP_DECR_CHK_COUNT(); + } + asoc->ctrl_queue_cnt = 0; + asoc->str_reset = NULL; + asoc->stream_reset_outstanding = 0; + TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { + TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next); + if (chk->data) { + sctp_m_freem(chk->data); + chk->data = NULL; + } + if (chk->holds_key_ref) + sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); + sctp_free_remote_addr(chk->whoTo); + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); + SCTP_DECR_CHK_COUNT(); + } + TAILQ_FOREACH_SAFE(aparam, &asoc->asconf_queue, next, naparam) { + TAILQ_REMOVE(&asoc->asconf_queue, aparam, next); + SCTP_FREE(aparam,SCTP_M_ASC_ADDR); + } + TAILQ_FOREACH_SAFE(aack, &asoc->asconf_ack_sent, next, naack) { + TAILQ_REMOVE(&asoc->asconf_ack_sent, aack, next); + if (aack->data != NULL) { + sctp_m_freem(aack->data); + } + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), aack); + } + /* process the INIT-ACK info (my info) */ asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); @@ -2104,7 +2088,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, LIST_REMOVE(stcb, sctp_asocs); /* re-insert to new vtag position */ head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, - SCTP_BASE_INFO(hashasocmark))]; + SCTP_BASE_INFO(hashasocmark))]; /* * put it in the bucket in the vtag hash of assoc's for the * system @@ -2114,17 +2098,23 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, SCTP_TCB_SEND_UNLOCK(stcb); SCTP_INP_WUNLOCK(stcb->sctp_ep); SCTP_INP_INFO_WUNLOCK(); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif asoc->total_flight = 0; asoc->total_flight_count = 0; /* process the INIT info (peer's info) */ - retval = sctp_process_init(init_cp, stcb); - if (retval < 0) { + if (sctp_process_init(init_cp, stcb) < 0) { if (how_indx < sizeof(asoc->cookie_how)) asoc->cookie_how[how_indx] = 13; - + op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); + SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n"); + sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, + src, dst, sh, op_err, +#if defined(__FreeBSD__) && !defined(__Userspace__) + mflowtype, mflowid, +#endif + vrf_id, net->port); return (NULL); } /* @@ -2133,30 +2123,41 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, */ net->hb_responded = 1; - if (sctp_load_addresses_from_init(stcb, m, - init_offset + sizeof(struct sctp_init_chunk), - initack_offset, src, dst, init_src, stcb->asoc.port)) { + if ((retval = sctp_load_addresses_from_init(stcb, m, + init_offset + sizeof(struct sctp_init_chunk), + initack_offset, src, dst, init_src, stcb->asoc.port)) < 0) { if (how_indx < sizeof(asoc->cookie_how)) asoc->cookie_how[how_indx] = 14; - + op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), + "Problem with address parameters"); + SCTPDBG(SCTP_DEBUG_INPUT1, + "Load addresses from INIT causes an abort %d\n", + retval); + sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, + src, dst, sh, op_err, +#if defined(__FreeBSD__) && !defined(__Userspace__) + mflowtype, mflowid, +#endif + vrf_id, net->port); return (NULL); } /* respond with a COOKIE-ACK */ - sctp_stop_all_cookie_timers(stcb); - sctp_toss_old_cookies(stcb, asoc); sctp_send_cookie_ack(stcb); if (how_indx < sizeof(asoc->cookie_how)) asoc->cookie_how[how_indx] = 15; - + if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE) && + (asoc->sctp_autoclose_ticks > 0)) { + sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); + } return (stcb); } if (how_indx < sizeof(asoc->cookie_how)) asoc->cookie_how[how_indx] = 16; /* all other cases... */ + SCTP_TCB_UNLOCK(stcb); return (NULL); } - /* * handle a state cookie for a new association m: input packet mbuf chain-- * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf @@ -2171,7 +2172,7 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, struct sctp_inpcb *inp, struct sctp_nets **netp, struct sockaddr *init_src, int *notification, int auth_skipped, uint32_t auth_offset, uint32_t auth_len, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t mflowtype, uint32_t mflowid, #endif uint32_t vrf_id, uint16_t port) @@ -2182,10 +2183,9 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, union sctp_sockstore store; struct sctp_association *asoc; int init_offset, initack_offset, initack_limit; - int retval; int error = 0; - uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE]; -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) + uint8_t auth_chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; so = SCTP_INP_SO(inp); @@ -2246,12 +2246,13 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, * Since we are getting a cookie, we cannot be unbound. */ stcb = sctp_aloc_assoc(inp, init_src, &error, - ntohl(initack_cp->init.initiate_tag), vrf_id, + ntohl(initack_cp->init.initiate_tag), + ntohl(initack_cp->init.initial_tsn), vrf_id, ntohs(initack_cp->init.num_outbound_streams), port, -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 +#if defined(__FreeBSD__) && !defined(__Userspace__) (struct thread *)NULL, -#elif defined(__Windows__) +#elif defined(_WIN32) && !defined(__Userspace__) (PKTHREAD)NULL, #else (struct proc *)NULL, @@ -2266,16 +2267,12 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, src, dst, sh, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, #endif vrf_id, port); return (NULL); } - /* get the correct sctp_nets */ - if (netp) - *netp = sctp_findnet(stcb, init_src); - asoc = &stcb->asoc; /* get scope variables out of cookie */ asoc->scope.ipv4_local_scope = cookie->ipv4_scope; @@ -2301,11 +2298,11 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, src, dst, sh, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, #endif vrf_id, port); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); SCTP_SOCKET_LOCK(so, 1); @@ -2314,30 +2311,17 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, #endif (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif return (NULL); } /* process the INIT-ACK info (my info) */ - asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); - asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); - asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); - asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; - asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; - asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; - asoc->str_reset_seq_in = asoc->init_seq_number; - - asoc->advanced_peer_ack_point = asoc->last_acked_seq; /* process the INIT info (peer's info) */ - if (netp) - retval = sctp_process_init(init_cp, stcb); - else - retval = 0; - if (retval < 0) { -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) + if (sctp_process_init(init_cp, stcb) < 0) { +#if defined(__APPLE__) && !defined(__Userspace__) atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); SCTP_SOCKET_LOCK(so, 1); @@ -2346,16 +2330,16 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, #endif (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif return (NULL); } /* load all addresses */ if (sctp_load_addresses_from_init(stcb, m, - init_offset + sizeof(struct sctp_init_chunk), initack_offset, - src, dst, init_src, port)) { -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) + init_offset + sizeof(struct sctp_init_chunk), + initack_offset, src, dst, init_src, port) < 0) { +#if defined(__APPLE__) && !defined(__Userspace__) atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); SCTP_SOCKET_LOCK(so, 1); @@ -2364,7 +2348,7 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, #endif (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif return (NULL); @@ -2379,13 +2363,16 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, if (auth_skipped) { struct sctp_auth_chunk *auth; - auth = (struct sctp_auth_chunk *) - sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); + if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) { + auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); + } else { + auth = NULL; + } if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { /* auth HMAC failed, dump the assoc and packet */ SCTPDBG(SCTP_DEBUG_AUTH1, "COOKIE-ECHO: AUTH failed\n"); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); SCTP_SOCKET_LOCK(so, 1); @@ -2394,7 +2381,7 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, #endif (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_21); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif return (NULL); @@ -2451,7 +2438,7 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, break; #endif default: -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); SCTP_SOCKET_LOCK(so, 1); @@ -2460,7 +2447,7 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, #endif (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif return (NULL); @@ -2471,7 +2458,7 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, SCTP_SET_STATE(stcb, SCTP_STATE_OPEN); if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, - stcb->sctp_ep, stcb, asoc->primary_destination); + stcb->sctp_ep, stcb, NULL); } sctp_stop_all_cookie_timers(stcb); SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); @@ -2489,9 +2476,11 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, * INIT/INIT-ACK/COOKIE arrived. But of course then it * should have went to the other code.. not here.. oh well.. * a bit of protection is worth having.. + * + * XXXMJ unlocked */ stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); SCTP_SOCKET_LOCK(so, 1); @@ -2503,7 +2492,7 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, } #endif soisconnected(stcb->sctp_socket); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && @@ -2515,19 +2504,21 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, */ ; } - /* since we did not send a HB make sure we don't double things */ - if ((netp) && (*netp)) - (*netp)->hb_responded = 1; - if (stcb->asoc.sctp_autoclose_ticks && sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); } (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); - if ((netp != NULL) && (*netp != NULL)) { + *netp = sctp_findnet(stcb, init_src); + if (*netp != NULL) { struct timeval old; - /* calculate the RTT and set the encaps port */ + /* + * Since we did not send a HB, make sure we don't double + * things. + */ + (*netp)->hb_responded = 1; + /* Calculate the RTT. */ old.tv_sec = cookie->time_entered.tv_sec; old.tv_usec = cookie->time_entered.tv_usec; sctp_calculate_rto(stcb, asoc, *netp, &old, SCTP_RTT_FROM_NON_DATA); @@ -2545,7 +2536,6 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, &store.sa, cookie->local_scope, cookie->site_scope, cookie->ipv4_scope, cookie->loopback_scope); - return (stcb); } @@ -2573,7 +2563,7 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, int auth_skipped, uint32_t auth_offset, uint32_t auth_len, struct sctp_tcb **locked_tcb, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t mflowtype, uint32_t mflowid, #endif uint32_t vrf_id, uint16_t port) @@ -2586,11 +2576,15 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, struct mbuf *m_sig; uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; uint8_t *sig; +#if defined(__Userspace__) && defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) + uint8_t cookie_ok = 1; +#else uint8_t cookie_ok = 0; +#endif unsigned int sig_offset, cookie_offset; unsigned int cookie_len; struct timeval now; - struct timeval time_expires; + struct timeval time_entered, time_expires; int notification = 0; struct sctp_nets *netl; int had_a_existing_tcb = 0; @@ -2633,6 +2627,27 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, */ return (NULL); } +#if defined(__Userspace__) + /* + * Recover the AF_CONN addresses within the cookie. + * This needs to be done in the buffer provided for later processing + * of the cookie and in the mbuf chain for HMAC validation. + */ + if ((cookie->addr_type == SCTP_CONN_ADDRESS) && (src->sa_family == AF_CONN)) { + struct sockaddr_conn *sconnp = (struct sockaddr_conn *)src; + + memcpy(cookie->address, &sconnp->sconn_addr , sizeof(void *)); + m_copyback(m, cookie_offset + offsetof(struct sctp_state_cookie, address), + (int)sizeof(void *), (caddr_t)&sconnp->sconn_addr); + } + if ((cookie->laddr_type == SCTP_CONN_ADDRESS) && (dst->sa_family == AF_CONN)) { + struct sockaddr_conn *sconnp = (struct sockaddr_conn *)dst; + + memcpy(cookie->laddress, &sconnp->sconn_addr , sizeof(void *)); + m_copyback(m, cookie_offset + offsetof(struct sctp_state_cookie, laddress), + (int)sizeof(void *), (caddr_t)&sconnp->sconn_addr); + } +#endif /* * split off the signature into its own mbuf (since it should not be * calculated in the sctp_hmac_m() call). @@ -2721,14 +2736,35 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, return (NULL); } - /* - * check the cookie timestamps to be sure it's not stale - */ + if (sctp_ticks_to_msecs(cookie->cookie_life) > SCTP_MAX_COOKIE_LIFE) { + SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: Invalid cookie lifetime\n"); + return (NULL); + } + time_entered.tv_sec = cookie->time_entered.tv_sec; + time_entered.tv_usec = cookie->time_entered.tv_usec; + if ((time_entered.tv_sec < 0) || + (time_entered.tv_usec < 0) || + (time_entered.tv_usec >= 1000000)) { + /* Invalid time stamp. Cookie must have been modified. */ + SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: Invalid time stamp\n"); + return (NULL); + } (void)SCTP_GETTIME_TIMEVAL(&now); - /* Expire time is in Ticks, so we convert to seconds */ - time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life); - time_expires.tv_usec = cookie->time_entered.tv_usec; -#ifndef __FreeBSD__ +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) + if (timercmp(&now, &time_entered, <)) { +#else + if (timevalcmp(&now, &time_entered, <)) { +#endif + SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie generated in the future!\n"); + return (NULL); + } + /* + * Check the cookie timestamps to be sure it's not stale. + * cookie_life is in ticks, so we convert to seconds. + */ + time_expires.tv_sec = time_entered.tv_sec + sctp_ticks_to_secs(cookie->cookie_life); + time_expires.tv_usec = time_entered.tv_usec; +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) if (timercmp(&now, &time_expires, >)) #else if (timevalcmp(&now, &time_expires, >)) @@ -2750,9 +2786,8 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_stale_cookie); cause = mtod(op_err, struct sctp_error_stale_cookie *); cause->cause.code = htons(SCTP_CAUSE_STALE_COOKIE); - cause->cause.length = htons((sizeof(struct sctp_paramhdr) + - (sizeof(uint32_t)))); -#ifndef __FreeBSD__ + cause->cause.length = htons(sizeof(struct sctp_error_stale_cookie)); +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) timersub(&now, &time_expires, &diff); #else diff = now; @@ -2761,16 +2796,16 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, if ((uint32_t)diff.tv_sec > UINT32_MAX / 1000000) { staleness = UINT32_MAX; } else { - staleness = diff.tv_sec * 1000000; + staleness = (uint32_t)diff.tv_sec * 1000000; } if (UINT32_MAX - staleness >= (uint32_t)diff.tv_usec) { - staleness += diff.tv_usec; + staleness += (uint32_t)diff.tv_usec; } else { staleness = UINT32_MAX; } cause->stale_time = htonl(staleness); sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, l_inp->fibnum, #endif vrf_id, port); @@ -2873,7 +2908,7 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, cookie, cookie_len, *inp_p, netp, to, ¬ification, auth_skipped, auth_offset, auth_len, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, #endif vrf_id, port); @@ -2882,19 +2917,22 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, had_a_existing_tcb = 1; *stcb = sctp_process_cookie_existing(m, iphlen, offset, src, dst, sh, - cookie, cookie_len, *inp_p, *stcb, netp, to, - ¬ification, auth_skipped, auth_offset, auth_len, -#if defined(__FreeBSD__) + cookie, cookie_len, *inp_p, *stcb, netp, to, + ¬ification, auth_skipped, auth_offset, auth_len, +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, #endif vrf_id, port); + if (*stcb == NULL) { + *locked_tcb = NULL; + } } if (*stcb == NULL) { /* still no TCB... must be bad cookie-echo */ return (NULL); } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (*netp != NULL) { (*netp)->flowtype = mflowtype; (*netp)->flowid = mflowid; @@ -2949,36 +2987,23 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, return (m); } oso = (*inp_p)->sctp_socket; -#if (defined(__FreeBSD__) && __FreeBSD_version < 700000) - /* - * We do this to keep the sockets side happy during - * the sonewcon ONLY. - */ - NET_LOCK_GIANT(); -#endif atomic_add_int(&(*stcb)->asoc.refcnt, 1); SCTP_TCB_UNLOCK((*stcb)); -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) CURVNET_SET(oso->so_vnet); #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_LOCK(oso, 1); #endif so = sonewconn(oso, 0 -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) ,NULL -#endif -#ifdef __Panda__ - ,NULL , (*inp_p)->def_vrf_id #endif ); -#if (defined(__FreeBSD__) && __FreeBSD_version < 700000) - NET_UNLOCK_GIANT(); -#endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(oso, 1); #endif -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) CURVNET_RESTORE(); #endif SCTP_TCB_LOCK((*stcb)); @@ -2986,7 +3011,7 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, if (so == NULL) { struct mbuf *op_err; -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *pcb_so; #endif /* Too many sockets */ @@ -2994,11 +3019,11 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); sctp_abort_association(*inp_p, NULL, m, iphlen, src, dst, sh, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, #endif vrf_id, port); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) pcb_so = SCTP_INP_SO(*inp_p); atomic_add_int(&(*stcb)->asoc.refcnt, 1); SCTP_TCB_UNLOCK((*stcb)); @@ -3008,7 +3033,7 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, #endif (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(pcb_so, 1); #endif return (NULL); @@ -3043,7 +3068,6 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, inp->sctp_context = (*inp_p)->sctp_context; inp->local_strreset_support = (*inp_p)->local_strreset_support; inp->fibnum = (*inp_p)->fibnum; - inp->inp_starting_point_for_iterator = NULL; #if defined(__Userspace__) inp->ulp_info = (*inp_p)->ulp_info; inp->recv_callback = (*inp_p)->recv_callback; @@ -3079,7 +3103,7 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, atomic_add_int(&(*stcb)->asoc.refcnt, 1); SCTP_TCB_UNLOCK((*stcb)); -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, 0); #else @@ -3088,7 +3112,6 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, SCTP_TCB_LOCK((*stcb)); atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); - /* now we must check to see if we were aborted while * the move was going on and the lock/unlock happened. */ @@ -3111,13 +3134,13 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, } /* Pull it from the incomplete queue and wake the guy */ -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) atomic_add_int(&(*stcb)->asoc.refcnt, 1); SCTP_TCB_UNLOCK((*stcb)); SCTP_SOCKET_LOCK(so, 1); #endif soisconnected(so); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_TCB_LOCK((*stcb)); atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); SCTP_SOCKET_UNLOCK(so, 1); @@ -3141,6 +3164,7 @@ sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED, { /* cp must not be used, others call this without a c-ack :-) */ struct sctp_association *asoc; + struct sctp_tmit_chunk *chk; SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); @@ -3166,8 +3190,7 @@ sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED, sctp_start_net_timers(stcb); if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, - stcb->sctp_ep, stcb, asoc->primary_destination); - + stcb->sctp_ep, stcb, NULL); } /* update RTO */ SCTP_STAT_INCR_COUNTER32(sctps_activeestab); @@ -3180,12 +3203,12 @@ sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED, sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(stcb->sctp_ep); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -3196,7 +3219,7 @@ sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED, if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) { soisconnected(stcb->sctp_socket); } -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif } @@ -3216,7 +3239,6 @@ sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED, sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); - if (stcb->asoc.sctp_autoclose_ticks && sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, @@ -3243,11 +3265,13 @@ sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED, closed_socket: /* Toss the cookie if I can */ sctp_toss_old_cookies(stcb, asoc); - if (!TAILQ_EMPTY(&asoc->sent_queue)) { - /* Restart the timer if we have pending data */ - struct sctp_tmit_chunk *chk; - - chk = TAILQ_FIRST(&asoc->sent_queue); + /* Restart the timer if we have pending data */ + TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { + if (chk->whoTo != NULL) { + break; + } + } + if (chk != NULL) { sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); } } @@ -3265,10 +3289,6 @@ sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, unsigned int pkt_cnt; len = ntohs(cp->ch.chunk_length); - if ((len != sizeof(struct sctp_ecne_chunk)) && - (len != sizeof(struct old_sctp_ecne_chunk))) { - return; - } if (len == sizeof(struct old_sctp_ecne_chunk)) { /* Its the old format */ memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk)); @@ -3415,7 +3435,7 @@ static void sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED, struct sctp_tcb *stcb, struct sctp_nets *net) { -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif @@ -3450,7 +3470,7 @@ sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSE /* free the TCB */ SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_shutdown_complete: calls free-asoc\n"); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(stcb->sctp_ep); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -3460,7 +3480,7 @@ sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSE #endif (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif return; @@ -3472,7 +3492,8 @@ process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, { switch (desc->chunk_type) { case SCTP_DATA: - /* find the tsn to resend (possibly */ + case SCTP_IDATA: + /* find the tsn to resend (possibly) */ { uint32_t tsn; struct sctp_tmit_chunk *tp1; @@ -3506,8 +3527,6 @@ process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, SCTP_STAT_INCR(sctps_pdrptsnnf); } if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { - uint8_t *ddp; - if (((flg & SCTP_BADCRC) == 0) && ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { return (0); @@ -3522,20 +3541,18 @@ process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, SCTP_STAT_INCR(sctps_pdrpdizrw); return (0); } - ddp = (uint8_t *) (mtod(tp1->data, caddr_t) + - sizeof(struct sctp_data_chunk)); - { - unsigned int iii; - - for (iii = 0; iii < sizeof(desc->data_bytes); - iii++) { - if (ddp[iii] != desc->data_bytes[iii]) { - SCTP_STAT_INCR(sctps_pdrpbadd); - return (-1); - } - } + if ((uint32_t)SCTP_BUF_LEN(tp1->data) < + SCTP_DATA_CHUNK_OVERHEAD(stcb) + SCTP_NUM_DB_TO_VERIFY) { + /* Payload not matching. */ + SCTP_STAT_INCR(sctps_pdrpbadd); + return (-1); + } + if (memcmp(mtod(tp1->data, caddr_t) + SCTP_DATA_CHUNK_OVERHEAD(stcb), + desc->data_bytes, SCTP_NUM_DB_TO_VERIFY) != 0) { + /* Payload not matching. */ + SCTP_STAT_INCR(sctps_pdrpbadd); + return (-1); } - if (tp1->do_rtt) { /* * this guy had a RTO calculation @@ -3779,7 +3796,6 @@ sctp_reset_clear_pending(struct sctp_tcb *stcb, uint32_t number_entries, uint16_ } } - struct sctp_stream_reset_request * sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) { @@ -3790,19 +3806,16 @@ sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chu int len, clen; asoc = &stcb->asoc; - if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { + chk = asoc->str_reset; + if (TAILQ_EMPTY(&asoc->control_send_queue) || + (chk == NULL)) { asoc->stream_reset_outstanding = 0; return (NULL); } - if (stcb->asoc.str_reset == NULL) { - asoc->stream_reset_outstanding = 0; - return (NULL); - } - chk = stcb->asoc.str_reset; if (chk->data == NULL) { return (NULL); } - if (bchk) { + if (bchk != NULL) { /* he wants a copy of the chk pointer */ *bchk = chk; } @@ -3838,7 +3851,7 @@ sctp_clean_up_stream_reset(struct sctp_tcb *stcb) } asoc->str_reset = NULL; sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, - chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28); + NULL, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28); TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); asoc->ctrl_queue_cnt--; if (chk->data) { @@ -3848,7 +3861,6 @@ sctp_clean_up_stream_reset(struct sctp_tcb *stcb) sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); } - static int sctp_handle_stream_reset_response(struct sctp_tcb *stcb, uint32_t seq, uint32_t action, @@ -3925,7 +3937,7 @@ sctp_handle_stream_reset_response(struct sctp_tcb *stcb, if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) { /* Put the new streams into effect */ int i; - for ( i = asoc->streamoutcnt; i< (asoc->streamoutcnt + num_stream); i++) { + for (i = asoc->streamoutcnt; i < (asoc->streamoutcnt + num_stream); i++) { asoc->strmout[i].state = SCTP_STREAM_OPEN; } asoc->streamoutcnt += num_stream; @@ -4309,7 +4321,6 @@ sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *ch sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); } else { sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); - } } @@ -4370,11 +4381,9 @@ sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk } } -#if !defined(__Panda__) #ifdef __GNUC__ __attribute__ ((noinline)) #endif -#endif static int sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset, struct sctp_chunkhdr *ch_req) @@ -4556,104 +4565,116 @@ static void sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit) { - uint32_t bottle_bw, on_queue; - uint16_t trunc_len; - unsigned int chlen; - unsigned int at; struct sctp_chunk_desc desc; - struct sctp_chunkhdr *ch; + struct sctp_chunkhdr *chk_hdr; + struct sctp_data_chunk *data_chunk; + struct sctp_idata_chunk *idata_chunk; + uint32_t bottle_bw, on_queue; + uint32_t offset, chk_len; + uint16_t pktdrp_len; + uint8_t pktdrp_flags; - chlen = ntohs(cp->ch.chunk_length); - chlen -= sizeof(struct sctp_pktdrop_chunk); - /* XXX possible chlen underflow */ - if (chlen == 0) { - ch = NULL; - if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) + KASSERT(sizeof(struct sctp_pktdrop_chunk) <= limit, + ("PKTDROP chunk too small")); + pktdrp_flags = cp->ch.chunk_flags; + pktdrp_len = ntohs(cp->ch.chunk_length); + KASSERT(limit <= pktdrp_len, ("Inconsistent limit")); + if (pktdrp_flags & SCTP_PACKET_TRUNCATED) { + if (ntohs(cp->trunc_len) <= pktdrp_len - sizeof(struct sctp_pktdrop_chunk)) { + /* The peer plays games with us. */ + return; + } + } + limit -= sizeof(struct sctp_pktdrop_chunk); + offset = 0; + if (offset == limit) { + if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) { SCTP_STAT_INCR(sctps_pdrpbwrpt); + } + } else if (offset + sizeof(struct sctphdr) > limit) { + /* Only a partial SCTP common header. */ + SCTP_STAT_INCR(sctps_pdrpcrupt); + offset = limit; } else { - ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr)); - chlen -= sizeof(struct sctphdr); - /* XXX possible chlen underflow */ - memset(&desc, 0, sizeof(desc)); + /* XXX: Check embedded SCTP common header. */ + offset += sizeof(struct sctphdr); } - trunc_len = (uint16_t) ntohs(cp->trunc_len); - if (trunc_len > limit) { - trunc_len = limit; - } - - /* now the chunks themselves */ - while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) { - desc.chunk_type = ch->chunk_type; - /* get amount we need to move */ - at = ntohs(ch->chunk_length); - if (at < sizeof(struct sctp_chunkhdr)) { - /* corrupt chunk, maybe at the end? */ + /* Now parse through the chunks themselves. */ + while (offset < limit) { + if (offset + sizeof(struct sctp_chunkhdr) > limit) { SCTP_STAT_INCR(sctps_pdrpcrupt); break; } - if (trunc_len == 0) { - /* we are supposed to have all of it */ - if (at > chlen) { - /* corrupt skip it */ - SCTP_STAT_INCR(sctps_pdrpcrupt); - break; - } - } else { - /* is there enough of it left ? */ - if (desc.chunk_type == SCTP_DATA) { - if (chlen < (sizeof(struct sctp_data_chunk) + - sizeof(desc.data_bytes))) { - break; - } - } else { - if (chlen < sizeof(struct sctp_chunkhdr)) { - break; - } - } + chk_hdr = (struct sctp_chunkhdr *)(cp->data + offset); + desc.chunk_type = chk_hdr->chunk_type; + /* get amount we need to move */ + chk_len = (uint32_t)ntohs(chk_hdr->chunk_length); + if (chk_len < sizeof(struct sctp_chunkhdr)) { + /* Someone is lying... */ + break; } if (desc.chunk_type == SCTP_DATA) { - /* can we get out the tsn? */ - if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) - SCTP_STAT_INCR(sctps_pdrpmbda); - - if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) { - /* yep */ - struct sctp_data_chunk *dcp; - uint8_t *ddp; - unsigned int iii; - - dcp = (struct sctp_data_chunk *)ch; - ddp = (uint8_t *) (dcp + 1); - for (iii = 0; iii < sizeof(desc.data_bytes); iii++) { - desc.data_bytes[iii] = ddp[iii]; - } - desc.tsn_ifany = dcp->dp.tsn; - } else { - /* nope we are done. */ - SCTP_STAT_INCR(sctps_pdrpnedat); + if (stcb->asoc.idata_supported) { + /* Some is playing games with us. */ break; } + if (chk_len <= sizeof(struct sctp_data_chunk)) { + /* Some is playing games with us. */ + break; + } + if (chk_len < sizeof(struct sctp_data_chunk) + SCTP_NUM_DB_TO_VERIFY) { + /* Not enough data bytes available in the chunk. */ + SCTP_STAT_INCR(sctps_pdrpnedat); + goto next_chunk; + } + if (offset + sizeof(struct sctp_data_chunk) + SCTP_NUM_DB_TO_VERIFY > limit) { + /* Not enough data in buffer. */ + break; + } + data_chunk = (struct sctp_data_chunk *)(cp->data + offset); + memcpy(desc.data_bytes, data_chunk + 1, SCTP_NUM_DB_TO_VERIFY); + desc.tsn_ifany = data_chunk->dp.tsn; + if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) { + SCTP_STAT_INCR(sctps_pdrpmbda); + } + } else if (desc.chunk_type == SCTP_IDATA) { + if (!stcb->asoc.idata_supported) { + /* Some is playing games with us. */ + break; + } + if (chk_len <= sizeof(struct sctp_idata_chunk)) { + /* Some is playing games with us. */ + break; + } + if (chk_len < sizeof(struct sctp_idata_chunk) + SCTP_NUM_DB_TO_VERIFY) { + /* Not enough data bytes available in the chunk. */ + SCTP_STAT_INCR(sctps_pdrpnedat); + goto next_chunk; + } + if (offset + sizeof(struct sctp_idata_chunk) + SCTP_NUM_DB_TO_VERIFY > limit) { + /* Not enough data in buffer. */ + break; + } + idata_chunk = (struct sctp_idata_chunk *)(cp->data + offset); + memcpy(desc.data_bytes, idata_chunk + 1, SCTP_NUM_DB_TO_VERIFY); + desc.tsn_ifany = idata_chunk->dp.tsn; + if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) { + SCTP_STAT_INCR(sctps_pdrpmbda); + } } else { - if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) + if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) { SCTP_STAT_INCR(sctps_pdrpmbct); + } } - - if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) { + if (process_chunk_drop(stcb, &desc, net, pktdrp_flags)) { SCTP_STAT_INCR(sctps_pdrppdbrk); break; } - if (SCTP_SIZE32(at) > chlen) { - break; - } - chlen -= SCTP_SIZE32(at); - if (chlen < sizeof(struct sctp_chunkhdr)) { - /* done, none left */ - break; - } - ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at)); +next_chunk: + offset += SCTP_SIZE32(chk_len); } /* Now update any rwnd --- possibly */ - if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) { + if ((pktdrp_flags & SCTP_FROM_MIDDLE_BOX) == 0) { /* From a peer, we get a rwnd report */ uint32_t a_rwnd; @@ -4689,7 +4710,7 @@ sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, } /* now middle boxes in sat networks get a cwnd bump */ - if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) && + if ((pktdrp_flags & SCTP_FROM_MIDDLE_BOX) && (stcb->asoc.sat_t3_loss_recovery == 0) && (stcb->asoc.sat_network)) { /* @@ -4711,17 +4732,15 @@ sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, * cookie-echo processing - return NULL to discard the packet (ie. no asoc, * bad packet,...) otherwise return the tcb for this packet */ -#if !defined(__Panda__) #ifdef __GNUC__ __attribute__ ((noinline)) #endif -#endif static struct sctp_tcb * sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, #endif uint32_t vrf_id, uint16_t port) @@ -4735,6 +4754,7 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, int ret; int abort_no_unlock = 0; int ecne_seen = 0; + int abort_flag; /* * How big should this be, and should it be alloc'd? Lets try the * d-mtu-ceiling for now (2k) and that should hopefully work ... @@ -4745,7 +4765,7 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, uint32_t auth_offset = 0, auth_len = 0; int auth_skipped = 0; int asconf_cnt = 0; -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif @@ -4848,11 +4868,13 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, if (auth_skipped && (stcb != NULL)) { struct sctp_auth_chunk *auth; - auth = (struct sctp_auth_chunk *) - sctp_m_getptr(m, auth_offset, - auth_len, chunk_buf); - got_auth = 1; - auth_skipped = 0; + if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) { + auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, chunk_buf); + got_auth = 1; + auth_skipped = 0; + } else { + auth = NULL; + } if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { /* auth HMAC failed so dump it */ @@ -4865,12 +4887,12 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, } } if (stcb == NULL) { - snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); + SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), msg); /* no association, so it's out of the blue... */ sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, inp->fibnum, #endif vrf_id, port); @@ -4897,29 +4919,6 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, } return (NULL); } - } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { - if (vtag_in != asoc->my_vtag) { - /* - * this could be a stale SHUTDOWN-ACK or the - * peer never got the SHUTDOWN-COMPLETE and - * is still hung; we have started a new asoc - * but it won't complete until the shutdown - * is completed - */ - if (stcb != NULL) { - SCTP_TCB_UNLOCK(stcb); - } - snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); - op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), - msg); - sctp_handle_ootb(m, iphlen, *offset, src, dst, - sh, inp, op_err, -#if defined(__FreeBSD__) - mflowtype, mflowid, fibnum, -#endif - vrf_id, port); - return (NULL); - } } else { /* for all other chunks, vtag must match */ if (vtag_in != asoc->my_vtag) { @@ -4982,10 +4981,7 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, chunk_buf); if (ch == NULL) { *offset = length; - if (stcb != NULL) { - SCTP_TCB_UNLOCK(stcb); - } - return (NULL); + return (stcb); } num_chunks++; @@ -5019,34 +5015,36 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, /* The INIT chunk must be the only chunk. */ if ((num_chunks > 1) || (length - *offset > (int)SCTP_SIZE32(chk_length))) { - /* RFC 4960 requires that no ABORT is sent */ + /* + * RFC 4960bis requires stopping the + * processing of the packet. + */ + *offset = length; + return (stcb); + } + /* Honor our resource limit. */ + if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) { + op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); + sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err, +#if defined(__FreeBSD__) && !defined(__Userspace__) + mflowtype, mflowid, inp->fibnum, +#endif + vrf_id, port); *offset = length; if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } return (NULL); } - /* Honor our resource limit. */ - if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) { - op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); - sctp_abort_association(inp, stcb, m, iphlen, - src, dst, sh, op_err, -#if defined(__FreeBSD__) - mflowtype, mflowid, -#endif - vrf_id, port); - *offset = length; - return (NULL); - } sctp_handle_init(m, iphlen, *offset, src, dst, sh, (struct sctp_init_chunk *)ch, inp, - stcb, *netp, &abort_no_unlock, -#if defined(__FreeBSD__) + stcb, *netp, +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, #endif vrf_id, port); *offset = length; - if ((!abort_no_unlock) && (stcb != NULL)) { + if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } return (NULL); @@ -5062,7 +5060,7 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, } else { *offset = length; if (stcb != NULL) { -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(inp); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -5072,7 +5070,7 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, #endif (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif } @@ -5091,7 +5089,7 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, #endif vrf_id); @@ -5231,20 +5229,19 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) { SCTP_STAT_INCR(sctps_recvheartbeat); sctp_send_heartbeat_ack(stcb, m, *offset, - chk_length, *netp); + chk_length, *netp); } break; case SCTP_HEARTBEAT_ACK: SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT_ACK\n"); if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { /* Its not ours */ - *offset = length; - return (stcb); + break; } SCTP_STAT_INCR(sctps_recvheartbeatack); if ((netp != NULL) && (*netp != NULL)) { sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, - stcb, *netp); + stcb, *netp); } break; case SCTP_ABORT_ASSOCIATION: @@ -5265,14 +5262,12 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n", (void *)stcb); if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { - *offset = length; - return (stcb); + break; } if ((netp != NULL) && (*netp != NULL)) { - int abort_flag = 0; - + abort_flag = 0; sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, - stcb, *netp, &abort_flag); + stcb, *netp, &abort_flag); if (abort_flag) { *offset = length; return (NULL); @@ -5281,11 +5276,12 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, break; case SCTP_SHUTDOWN_ACK: SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_ACK, stcb %p\n", (void *)stcb); - if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) { + if ((chk_length == sizeof(struct sctp_shutdown_ack_chunk)) && + (stcb != NULL) && (netp != NULL) && (*netp != NULL)) { sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); + *offset = length; + return (NULL); } - *offset = length; - return (NULL); break; case SCTP_OPERATION_ERROR: SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP_ERR\n"); @@ -5331,7 +5327,7 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, if ((stcb == NULL) && (!SCTP_IS_LISTENING(inp) || (!(inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && -#if defined(__FreeBSD__) && __FreeBSD_version >= 1200034 +#if defined(__FreeBSD__) && !defined(__Userspace__) inp->sctp_socket->sol_qlen >= inp->sctp_socket->sol_qlimit))) { #else inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit))) { @@ -5341,7 +5337,7 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, #endif vrf_id, port); @@ -5351,6 +5347,13 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, } else { struct mbuf *ret_buf; struct sctp_inpcb *linp; + struct sctp_tmit_chunk *chk; + + if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | + SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { + goto abend; + } + if (stcb) { linp = NULL; } else { @@ -5359,11 +5362,6 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, if (linp != NULL) { SCTP_ASOC_CREATE_LOCK(linp); - if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || - (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { - SCTP_ASOC_CREATE_UNLOCK(linp); - goto abend; - } } if (netp != NULL) { @@ -5381,7 +5379,7 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, auth_offset, auth_len, &locked_stcb, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, #endif @@ -5413,14 +5411,13 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, got_auth = 1; auth_skipped = 0; } - if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { - /* - * Restart the timer if we have - * pending data - */ - struct sctp_tmit_chunk *chk; - - chk = TAILQ_FIRST(&stcb->asoc.sent_queue); + /* Restart the timer if we have pending data */ + TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { + if (chk->whoTo != NULL) { + break; + } + } + if (chk != NULL) { sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); } } @@ -5428,14 +5425,14 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, case SCTP_COOKIE_ACK: SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE_ACK, stcb %p\n", (void *)stcb); if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { - return (stcb); + break; } if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { /* We are not interested anymore */ if ((stcb) && (stcb->asoc.total_output_queue_size)) { ; } else if (stcb) { -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(inp); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -5445,7 +5442,7 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, #endif (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif *offset = length; @@ -5458,26 +5455,30 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, break; case SCTP_ECN_ECHO: SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_ECHO\n"); - if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) { - /* Its not ours */ - *offset = length; - return (stcb); + if (stcb == NULL) { + break; } if (stcb->asoc.ecn_supported == 0) { goto unknown_chunk; } + if ((chk_length != sizeof(struct sctp_ecne_chunk)) && + (chk_length != sizeof(struct old_sctp_ecne_chunk))) { + break; + } sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, stcb); ecne_seen = 1; break; case SCTP_ECN_CWR: SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_CWR\n"); - if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) { - *offset = length; - return (stcb); + if (stcb == NULL) { + break; } if (stcb->asoc.ecn_supported == 0) { goto unknown_chunk; } + if (chk_length != sizeof(struct sctp_cwr_chunk)) { + break; + } sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp); break; case SCTP_SHUTDOWN_COMPLETE: @@ -5488,12 +5489,13 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, *offset = length; return (stcb); } - if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) { + if ((chk_length == sizeof(struct sctp_shutdown_complete_chunk)) && + (stcb != NULL) && (netp != NULL) && (*netp != NULL)) { sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, - stcb, *netp); + stcb, *netp); + *offset = length; + return (NULL); } - *offset = length; - return (NULL); break; case SCTP_ASCONF: SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); @@ -5502,93 +5504,105 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, goto unknown_chunk; } sctp_handle_asconf(m, *offset, src, - (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0); + (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0); asconf_cnt++; } break; case SCTP_ASCONF_ACK: SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF_ACK\n"); - if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { - /* Its not ours */ - *offset = length; - return (stcb); + if (stcb == NULL) { + break; } - if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) { - if (stcb->asoc.asconf_supported == 0) { - goto unknown_chunk; - } + if (stcb->asoc.asconf_supported == 0) { + goto unknown_chunk; + } + if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { + break; + } + if ((netp != NULL) && (*netp != NULL)) { /* He's alive so give him credit */ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { sctp_misc_ints(SCTP_THRESHOLD_CLEAR, - stcb->asoc.overall_error_count, - 0, - SCTP_FROM_SCTP_INPUT, - __LINE__); + stcb->asoc.overall_error_count, + 0, + SCTP_FROM_SCTP_INPUT, + __LINE__); } stcb->asoc.overall_error_count = 0; sctp_handle_asconf_ack(m, *offset, - (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock); + (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock); if (abort_no_unlock) return (NULL); } break; case SCTP_FORWARD_CUM_TSN: case SCTP_IFORWARD_CUM_TSN: - SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD_TSN\n"); - if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { - /* Its not ours */ - *offset = length; - return (stcb); + SCTPDBG(SCTP_DEBUG_INPUT3, "%s\n", + ch->chunk_type == SCTP_FORWARD_CUM_TSN ? "FORWARD_TSN" : "I_FORWARD_TSN"); + if (stcb == NULL) { + break; } - - if (stcb != NULL) { - int abort_flag = 0; - - if (stcb->asoc.prsctp_supported == 0) { - goto unknown_chunk; + if (stcb->asoc.prsctp_supported == 0) { + goto unknown_chunk; + } + if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { + break; + } + if (((stcb->asoc.idata_supported == 1) && (ch->chunk_type == SCTP_FORWARD_CUM_TSN)) || + ((stcb->asoc.idata_supported == 0) && (ch->chunk_type == SCTP_IFORWARD_CUM_TSN))) { + if (ch->chunk_type == SCTP_FORWARD_CUM_TSN) { + SCTP_SNPRINTF(msg, sizeof(msg), "%s", "FORWARD-TSN chunk received when I-FORWARD-TSN was negotiated"); + } else { + SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-FORWARD-TSN chunk received when FORWARD-TSN was negotiated"); } - *fwd_tsn_seen = 1; - if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { - /* We are not interested anymore */ -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) - so = SCTP_INP_SO(inp); - atomic_add_int(&stcb->asoc.refcnt, 1); - SCTP_TCB_UNLOCK(stcb); - SCTP_SOCKET_LOCK(so, 1); - SCTP_TCB_LOCK(stcb); - atomic_subtract_int(&stcb->asoc.refcnt, 1); + op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); + sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); + *offset = length; + return (NULL); + } + *fwd_tsn_seen = 1; + if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { + /* We are not interested anymore */ +#if defined(__APPLE__) && !defined(__Userspace__) + so = SCTP_INP_SO(inp); + atomic_add_int(&stcb->asoc.refcnt, 1); + SCTP_TCB_UNLOCK(stcb); + SCTP_SOCKET_LOCK(so, 1); + SCTP_TCB_LOCK(stcb); + atomic_subtract_int(&stcb->asoc.refcnt, 1); #endif - (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, - SCTP_FROM_SCTP_INPUT + SCTP_LOC_31); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) - SCTP_SOCKET_UNLOCK(so, 1); + (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, + SCTP_FROM_SCTP_INPUT + SCTP_LOC_31); +#if defined(__APPLE__) && !defined(__Userspace__) + SCTP_SOCKET_UNLOCK(so, 1); #endif - *offset = length; - return (NULL); - } - /* - * For sending a SACK this looks like DATA - * chunks. - */ - stcb->asoc.last_data_chunk_from = stcb->asoc.last_control_chunk_from; - sctp_handle_forward_tsn(stcb, - (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset); - if (abort_flag) { - *offset = length; - return (NULL); - } + *offset = length; + return (NULL); + } + /* + * For sending a SACK this looks like DATA + * chunks. + */ + stcb->asoc.last_data_chunk_from = stcb->asoc.last_control_chunk_from; + abort_flag = 0; + sctp_handle_forward_tsn(stcb, + (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset); + if (abort_flag) { + *offset = length; + return (NULL); } break; case SCTP_STREAM_RESET: SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); - if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) { - /* Its not ours */ - *offset = length; - return (stcb); + if (stcb == NULL) { + break; } if (stcb->asoc.reconfig_supported == 0) { goto unknown_chunk; } + if (chk_length < sizeof(struct sctp_stream_reset_tsn_req)) { + break; + } if (sctp_handle_stream_reset(stcb, m, *offset, ch)) { /* stop processing */ *offset = length; @@ -5597,20 +5611,19 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, break; case SCTP_PACKET_DROPPED: SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); - /* re-get it all please */ - if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { - /* Its not ours */ - *offset = length; - return (stcb); + if (stcb == NULL) { + break; } - - if ((ch != NULL) && (stcb != NULL) && (netp != NULL) && (*netp != NULL)) { - if (stcb->asoc.pktdrop_supported == 0) { - goto unknown_chunk; - } + if (stcb->asoc.pktdrop_supported == 0) { + goto unknown_chunk; + } + if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { + break; + } + if ((netp != NULL) && (*netp != NULL)) { sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, - stcb, *netp, - min(chk_length, contiguous)); + stcb, *netp, + min(chk_length, contiguous)); } break; case SCTP_AUTHENTICATION: @@ -5623,25 +5636,24 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, auth_skipped = 1; } /* skip this chunk (temporarily) */ - goto next_chunk; + break; } if (stcb->asoc.auth_supported == 0) { goto unknown_chunk; } if ((chk_length < (sizeof(struct sctp_auth_chunk))) || (chk_length > (sizeof(struct sctp_auth_chunk) + - SCTP_AUTH_DIGEST_LEN_MAX))) { + SCTP_AUTH_DIGEST_LEN_MAX))) { /* Its not ours */ *offset = length; return (stcb); } if (got_auth == 1) { /* skip this chunk... it's already auth'd */ - goto next_chunk; + break; } got_auth = 1; - if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, - m, *offset)) { + if (sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, m, *offset)) { /* auth HMAC failed so dump the packet */ *offset = length; return (stcb); @@ -5654,7 +5666,11 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, default: unknown_chunk: /* it's an unknown chunk! */ - if ((ch->chunk_type & 0x40) && (stcb != NULL)) { + if ((ch->chunk_type & 0x40) && + (stcb != NULL) && + (SCTP_GET_STATE(stcb) != SCTP_STATE_EMPTY) && + (SCTP_GET_STATE(stcb) != SCTP_STATE_INUSE) && + (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) { struct sctp_gen_error_cause *cause; int len; @@ -5663,7 +5679,7 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, if (op_err != NULL) { len = min(SCTP_SIZE32(chk_length), (uint32_t)(length - *offset)); cause = mtod(op_err, struct sctp_gen_error_cause *); - cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); + cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); cause->length = htons((uint16_t)(len + sizeof(struct sctp_gen_error_cause))); SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, len, M_NOWAIT); @@ -5687,7 +5703,6 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, break; } /* switch (ch->chunk_type) */ - next_chunk: /* get the next chunk */ *offset += SCTP_SIZE32(chk_length); @@ -5696,7 +5711,7 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, break; } ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, - sizeof(struct sctp_chunkhdr), chunk_buf); + sizeof(struct sctp_chunkhdr), chunk_buf); if (ch == NULL) { *offset = length; return (stcb); @@ -5709,7 +5724,6 @@ sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, return (stcb); } - /* * common input chunk processing (v4 and v6) */ @@ -5719,7 +5733,7 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt struct sctphdr *sh, struct sctp_chunkhdr *ch, uint8_t compute_crc, uint8_t ecn_bits, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, #endif uint32_t vrf_id, uint16_t port) @@ -5771,7 +5785,7 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt net->port = port; } #endif -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (net != NULL) { net->flowtype = mflowtype; net->flowid = mflowid; @@ -5813,25 +5827,25 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt net->port = port; } #endif -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (net != NULL) { net->flowtype = mflowtype; net->flowid = mflowid; } #endif if (inp == NULL) { -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh); #endif SCTP_STAT_INCR(sctps_noport); -#if defined(__FreeBSD__) && (((__FreeBSD_version < 900000) && (__FreeBSD_version >= 804000)) || (__FreeBSD_version > 900000)) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) { goto out; } #endif if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { sctp_send_shutdown_complete2(src, dst, sh, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, fibnum, #endif vrf_id, port); @@ -5848,7 +5862,7 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt "Out of the blue"); sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, fibnum, #endif vrf_id, port); @@ -5877,14 +5891,14 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt */ SCTP_TCB_UNLOCK(stcb); stcb = NULL; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh); #endif - snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); + SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), msg); sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, inp->fibnum, #endif vrf_id, port); @@ -5911,7 +5925,7 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt stcb = sctp_process_control(m, iphlen, &offset, length, src, dst, sh, ch, inp, stcb, &net, &fwd_tsn_seen, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, fibnum, #endif vrf_id, port); @@ -5952,7 +5966,7 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt if ((stcb != NULL) && sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) { /* "silently" ignore */ -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh); #endif SCTP_STAT_INCR(sctps_recvauthmissing); @@ -5960,14 +5974,14 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt } if (stcb == NULL) { /* out of the blue DATA chunk */ -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) SCTP_PROBE5(receive, NULL, NULL, m, NULL, sh); #endif - snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); + SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), msg); sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, fibnum, #endif vrf_id, port); @@ -5975,7 +5989,7 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt } if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { /* v_tag mismatch! */ -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh); #endif SCTP_STAT_INCR(sctps_badvtag); @@ -5983,7 +5997,7 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt } } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh); #endif if (stcb == NULL) { @@ -6049,11 +6063,11 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt /* * We consider OOTB any data sent during asoc setup. */ - snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); + SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), msg); sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, inp->fibnum, #endif vrf_id, port); @@ -6083,7 +6097,9 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt stcb = NULL; goto out; } - data_processed = 1; + if (retval == 0) { + data_processed = 1; + } /* * Anything important needs to have been m_copy'ed in * process_data @@ -6131,9 +6147,8 @@ trigger_send: if (!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue) || cnt_ctrl_ready || stcb->asoc.trigger_reset || - ((un_sent) && - (stcb->asoc.peers_rwnd > 0 || - (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) { + ((un_sent > 0) && + (stcb->asoc.peers_rwnd > 0 || stcb->asoc.total_flight == 0))) { SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); @@ -6171,21 +6186,8 @@ trigger_send: #ifdef INET #if !defined(__Userspace__) -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) void sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port) -#elif defined(__Panda__) -void -sctp_input(pakhandle_type i_pak) -#else -void -#if __STDC__ -sctp_input(struct mbuf *i_pak,...) -#else -sctp_input(i_pak, va_alist) - struct mbuf *i_pak; -#endif -#endif { struct mbuf *m; int iphlen; @@ -6197,30 +6199,21 @@ sctp_input(i_pak, va_alist) struct sctp_chunkhdr *ch; int length, offset; uint8_t compute_crc; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint32_t mflowid; uint8_t mflowtype; uint16_t fibnum; #endif -#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)) +#if defined(__Userspace__) uint16_t port = 0; #endif -#if defined(__Panda__) - /* This is Evil, but its the only way to make panda work right. */ - iphlen = sizeof(struct ip); -#else iphlen = off; -#endif if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { SCTP_RELEASE_PKT(i_pak); return; } m = SCTP_HEADER_TO_CHAIN(i_pak); -#ifdef __Panda__ - SCTP_DETACH_HEADER_FROM_CHAIN(i_pak); - (void)SCTP_RELEASE_HEADER(i_pak); -#endif #ifdef SCTP_MBUF_LOGGING /* Log in any input mbufs */ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { @@ -6232,28 +6225,14 @@ sctp_input(i_pak, va_alist) sctp_packet_log(m); } #endif -#if defined(__FreeBSD__) -#if __FreeBSD_version > 1000049 +#if defined(__FreeBSD__) && !defined(__Userspace__) SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, "sctp_input(): Packet of length %d received on %s with csum_flags 0x%b.\n", m->m_pkthdr.len, if_name(m->m_pkthdr.rcvif), (int)m->m_pkthdr.csum_flags, CSUM_BITS); -#elif __FreeBSD_version >= 800000 - SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, - "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n", - m->m_pkthdr.len, - if_name(m->m_pkthdr.rcvif), - m->m_pkthdr.csum_flags); -#else - SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, - "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n", - m->m_pkthdr.len, - m->m_pkthdr.rcvif->if_xname, - m->m_pkthdr.csum_flags); #endif -#endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, "sctp_input(): Packet of length %d received on %s%d with csum_flags 0x%x.\n", m->m_pkthdr.len, @@ -6261,14 +6240,14 @@ sctp_input(i_pak, va_alist) m->m_pkthdr.rcvif->if_unit, m->m_pkthdr.csum_flags); #endif -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n", m->m_pkthdr.len, m->m_pkthdr.rcvif->if_xname, m->m_pkthdr.csum_flags); #endif -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowid = m->m_pkthdr.flowid; mflowtype = M_HASHTYPE_GET(m); fibnum = M_GETFIB(m); @@ -6301,26 +6280,22 @@ sctp_input(i_pak, va_alist) #endif dst.sin_port = sh->dest_port; dst.sin_addr = ip->ip_dst; -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) NTOHS(ip->ip_len); #endif -#if defined(__Userspace_os_Linux) || defined(__Userspace_os_Windows) +#if defined(__linux__) || (defined(_WIN32) && defined(__Userspace__)) ip->ip_len = ntohs(ip->ip_len); #endif -#if defined(__FreeBSD__) -#if __FreeBSD_version >= 1000000 - length = ntohs(ip->ip_len); -#else - length = ip->ip_len + iphlen; -#endif -#elif defined(__APPLE__) - length = ip->ip_len + iphlen; -#elif defined(__Userspace__) -#if defined(__Userspace_os_Linux) || defined(__Userspace_os_Windows) +#if defined(__Userspace__) +#if defined(__linux__) || defined(_WIN32) length = ip->ip_len; #else length = ip->ip_len + iphlen; #endif +#elif defined(__FreeBSD__) + length = ntohs(ip->ip_len); +#elif defined(__APPLE__) + length = ip->ip_len + iphlen; #else length = ip->ip_len; #endif @@ -6339,7 +6314,7 @@ sctp_input(i_pak, va_alist) goto out; } ecn_bits = ip->ip_tos; -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 +#if defined(__FreeBSD__) && !defined(__Userspace__) if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) { SCTP_STAT_INCR(sctps_recvhwcrc); compute_crc = 0; @@ -6361,7 +6336,7 @@ sctp_input(i_pak, va_alist) sh, ch, compute_crc, ecn_bits, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, fibnum, #endif vrf_id, port); @@ -6372,11 +6347,13 @@ sctp_input(i_pak, va_alist) return; } -#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) +#if defined(__FreeBSD__) && !defined(__Userspace__) +#if defined(SCTP_MCORE_INPUT) && defined(SMP) extern int *sctp_cpuarry; #endif +#endif -#if defined(__FreeBSD__) && __FreeBSD_version >= 1100020 +#if defined(__FreeBSD__) && !defined(__Userspace__) int sctp_input(struct mbuf **mp, int *offp, int proto SCTP_UNUSED) { @@ -6390,7 +6367,8 @@ void sctp_input(struct mbuf *m, int off) { #endif -#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) +#if defined(__FreeBSD__) && !defined(__Userspace__) +#if defined(SCTP_MCORE_INPUT) && defined(SMP) if (mp_ncpus > 1) { struct ip *ip; struct sctphdr *sh; @@ -6408,11 +6386,7 @@ sctp_input(struct mbuf *m, int off) if (SCTP_BUF_LEN(m) < offset) { if ((m = m_pullup(m, offset)) == NULL) { SCTP_STAT_INCR(sctps_hdrops); -#if defined(__FreeBSD__) && __FreeBSD_version >= 1100020 return (IPPROTO_DONE); -#else - return; -#endif } } ip = mtod(m, struct ip *); @@ -6424,15 +6398,12 @@ sctp_input(struct mbuf *m, int off) } cpu_to_use = sctp_cpuarry[flowid % mp_ncpus]; sctp_queue_to_mcore(m, off, cpu_to_use); -#if defined(__FreeBSD__) && __FreeBSD_version >= 1100020 return (IPPROTO_DONE); -#else - return; -#endif } +#endif #endif sctp_input_with_port(m, off, 0); -#if defined(__FreeBSD__) && __FreeBSD_version >= 1100020 +#if defined(__FreeBSD__) && !defined(__Userspace__) return (IPPROTO_DONE); #endif } diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_input.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_input.h index 2d01beb08..47ae2127b 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_input.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_input.h @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.h 326672 2017-12-07 22:19:08Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.h 365071 2020-09-01 21:19:14Z mjg $"); #endif #ifndef _NETINET_SCTP_INPUT_H_ @@ -47,7 +47,7 @@ sctp_common_input_processing(struct mbuf **, int, int, int, struct sctphdr *, struct sctp_chunkhdr *, uint8_t, uint8_t, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t, uint32_t, uint16_t, #endif uint32_t, uint16_t); @@ -56,9 +56,9 @@ struct sctp_stream_reset_request * sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk); -void sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, - uint16_t *list); - +void +sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, + uint16_t *list); int sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked); diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_lock_userspace.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_lock_userspace.h index 7b8b0cbdd..afdbb95af 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_lock_userspace.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_lock_userspace.h @@ -33,7 +33,7 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include __FBSDID("$FreeBSD$"); #endif @@ -63,12 +63,15 @@ __FBSDID("$FreeBSD$"); #define SCTP_INP_INFO_TRYLOCK() 1 #define SCTP_INP_INFO_RUNLOCK() #define SCTP_INP_INFO_WUNLOCK() +#define SCTP_INP_INFO_LOCK_ASSERT() +#define SCTP_INP_INFO_RLOCK_ASSERT() +#define SCTP_INP_INFO_WLOCK_ASSERT() #define SCTP_WQ_ADDR_INIT() #define SCTP_WQ_ADDR_DESTROY() #define SCTP_WQ_ADDR_LOCK() #define SCTP_WQ_ADDR_UNLOCK() - +#define SCTP_WQ_ADDR_LOCK_ASSERT() #define SCTP_IPI_ADDR_INIT() #define SCTP_IPI_ADDR_DESTROY() @@ -76,20 +79,19 @@ __FBSDID("$FreeBSD$"); #define SCTP_IPI_ADDR_WLOCK() #define SCTP_IPI_ADDR_RUNLOCK() #define SCTP_IPI_ADDR_WUNLOCK() +#define SCTP_IPI_ADDR_LOCK_ASSERT() +#define SCTP_IPI_ADDR_WLOCK_ASSERT() #define SCTP_IPI_ITERATOR_WQ_INIT() #define SCTP_IPI_ITERATOR_WQ_DESTROY() #define SCTP_IPI_ITERATOR_WQ_LOCK() #define SCTP_IPI_ITERATOR_WQ_UNLOCK() - #define SCTP_IP_PKTLOG_INIT() #define SCTP_IP_PKTLOG_LOCK() #define SCTP_IP_PKTLOG_UNLOCK() #define SCTP_IP_PKTLOG_DESTROY() - - #define SCTP_INP_READ_INIT(_inp) #define SCTP_INP_READ_DESTROY(_inp) #define SCTP_INP_READ_LOCK(_inp) @@ -100,9 +102,10 @@ __FBSDID("$FreeBSD$"); #define SCTP_INP_LOCK_DESTROY(_inp) #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) - #define SCTP_INP_RLOCK(_inp) #define SCTP_INP_WLOCK(_inp) +#define SCTP_INP_RLOCK_ASSERT(_inp) +#define SCTP_INP_WLOCK_ASSERT(_inp) #define SCTP_INP_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */ @@ -214,14 +217,6 @@ __FBSDID("$FreeBSD$"); } while (0) -/* not sure if __Userspace__ needs these (but copied nonetheless...) */ -#if defined(SCTP_SO_LOCK_TESTING) -#define SCTP_INP_SO(sctpinp) (sctpinp)->ip_inp.inp.inp_socket -#define SCTP_SOCKET_LOCK(so, refcnt) -#define SCTP_SOCKET_UNLOCK(so, refcnt) -#endif - - /* these were in sctp_lock_empty.h but aren't in sctp_lock_bsd.h ... */ #if 0 #define SCTP_IPI_ADDR_LOCK() diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_os.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_os.h index b3746dd94..0e03ce9e5 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_os.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_os.h @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_os.h 235828 2012-05-23 11:26:28Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_os.h 365071 2020-09-01 21:19:14Z mjg $"); #endif #ifndef _NETINET_SCTP_OS_H_ @@ -64,25 +64,18 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_os.h 235828 2012-05-23 11:26:28Z tuexe * SCTP_ZONE_DESTROY(zone) */ -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #else #define MODULE_GLOBAL(_B) (_B) #endif - #if defined(__Userspace__) #include #endif - -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #include #endif - -#if defined(__Panda__) -#include -#endif - -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) #include #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_os_userspace.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_os_userspace.h index f09cb8da2..46b618110 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_os_userspace.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_os_userspace.h @@ -43,7 +43,7 @@ #include -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) #include #include #include @@ -83,28 +83,24 @@ typedef HANDLE userland_thread_t; #if !defined(_MSC_VER) || (_MSC_VER >= 1600) #include #else -#define uint64_t unsigned __int64 -#define uint32_t unsigned __int32 -#define int32_t __int32 -#define uint16_t unsigned __int16 -#define int16_t __int16 -#define uint8_t unsigned __int8 -#define int8_t __int8 +typedef unsigned __int64 uint64_t; +typedef unsigned __int32 uint32_t; +typedef __int32 int32_t; +typedef unsigned __int16 uint16_t; +typedef __int16 int16_t; +typedef unsigned __int8 uint8_t; +typedef __int8 int8_t; #endif #ifndef _SIZE_T_DEFINED -#define size_t __int32 +#typedef __int32 size_t; +#endif +typedef unsigned __int32 u_int; +typedef unsigned char u_char; +typedef unsigned __int16 u_short; +typedef unsigned __int8 sa_family_t; +#ifndef _SSIZE_T_DEFINED +typedef __int64 ssize_t; #endif -#define u_long unsigned __int64 -#define u_int unsigned __int32 -#define u_int32_t unsigned __int32 -#define u_int16_t unsigned __int16 -#define u_int8_t unsigned __int8 -#define u_char unsigned char -#define n_short unsigned __int16 -#define u_short unsigned __int16 -#define n_time unsigned __int32 -#define sa_family_t unsigned __int8 -#define ssize_t __int64 #if !defined(__MINGW32__) #define __func__ __FUNCTION__ #endif @@ -221,9 +217,19 @@ typedef char* caddr_t; #define bzero(buf, len) memset(buf, 0, len) #define bcopy(srcKey, dstKey, len) memcpy(dstKey, srcKey, len) + #if defined(_MSC_VER) && (_MSC_VER < 1900) && !defined(__MINGW32__) -#define snprintf(data, size, format, ...) _snprintf_s(data, size, _TRUNCATE, format, __VA_ARGS__) +#define SCTP_SNPRINTF(data, size, format, ...) \ + if (_snprintf_s(data, size, _TRUNCATE, format, __VA_ARGS__) < 0) { \ + data[0] = '\0'; \ + } +#else +#define SCTP_SNPRINTF(data, ...) \ + if (snprintf(data, __VA_ARGS__) < 0 ) { \ + data[0] = '\0'; \ + } #endif + #define inline __inline #define __inline__ __inline #define MSG_EOR 0x8 /* data completes record */ @@ -277,15 +283,19 @@ typedef char* caddr_t; #else /* !defined(Userspace_os_Windows) */ #include -#if defined(__Userspace_os_DragonFly) || defined(__Userspace_os_FreeBSD) || defined(__Userspace_os_Linux) || defined(__Userspace_os_NetBSD) || defined(__Userspace_os_OpenBSD) || defined(__Userspace_os_NaCl) || defined(__Userspace_os_Fuchsia) -#include + +#if defined(__EMSCRIPTEN__) && !defined(__EMSCRIPTEN_PTHREADS__) +#error "Unsupported build configuration." #endif + +#include + typedef pthread_mutex_t userland_mutex_t; typedef pthread_cond_t userland_cond_t; typedef pthread_t userland_thread_t; #endif -#if defined(__Userspace_os_Windows) || defined(__Userspace_os_NaCl) +#if defined(_WIN32) || defined(__native_client__) #define IFNAMSIZ 64 @@ -379,43 +389,9 @@ struct ifkpi { int ifk_value; } ifk_data; }; - -struct ifreq { - char ifr_name[16]; - union { - struct sockaddr ifru_addr; - struct sockaddr ifru_dstaddr; - struct sockaddr ifru_broadaddr; - short ifru_flags; - int ifru_metric; - int ifru_mtu; - int ifru_phys; - int ifru_media; - int ifru_intval; - char* ifru_data; - struct ifdevmtu ifru_devmtu; - struct ifkpi ifru_kpi; - uint32_t ifru_wake_flags; - } ifr_ifru; -#define ifr_addr ifr_ifru.ifru_addr -#define ifr_dstaddr ifr_ifru.ifru_dstaddr -#define ifr_broadaddr ifr_ifru.ifru_broadaddr -#define ifr_flags ifr_ifru.ifru_flags[0] -#define ifr_prevflags ifr_ifru.ifru_flags[1] -#define ifr_metric ifr_ifru.ifru_metric -#define ifr_mtu ifr_ifru.ifru_mtu -#define ifr_phys ifr_ifru.ifru_phys -#define ifr_media ifr_ifru.ifru_media -#define ifr_data ifr_ifru.ifru_data -#define ifr_devmtu ifr_ifru.ifru_devmtu -#define ifr_intval ifr_ifru.ifru_intval -#define ifr_kpi ifr_ifru.ifru_kpi -#define ifr_wake_flags ifr_ifru.ifru_wake_flags -}; - #endif -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) int Win_getifaddrs(struct ifaddrs**); #define getifaddrs(interfaces) (int)Win_getifaddrs(interfaces) int win_if_nametoindex(const char *); @@ -426,9 +402,9 @@ int win_if_nametoindex(const char *); #define mtx_unlock(arg1) #define mtx_assert(arg1,arg2) #define MA_OWNED 7 /* sys/mutex.h typically on FreeBSD */ -#if !defined(__Userspace_os_FreeBSD) +#if !defined(__FreeBSD__) struct mtx {int dummy;}; -#if !defined(__Userspace_os_NetBSD) +#if !defined(__NetBSD__) struct selinfo {int dummy;}; #endif struct sx {int dummy;}; @@ -436,6 +412,7 @@ struct sx {int dummy;}; #include #include +#include /* #include in FreeBSD defines MSIZE */ /* #include */ /* #include */ @@ -458,22 +435,22 @@ struct sx {int dummy;}; #include /* #include */ /* #include */ -#if defined(__FreeBSD__) && __FreeBSD_version > 602000 +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #endif /* #include */ -#if defined(__FreeBSD__) && __FreeBSD_version > 602000 +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #endif /* #include */ #include /* #include */ -#if defined(__Userspace_os_Darwin) +#if defined(__APPLE__) /* was a 0 byte file. needed for structs if_data(64) and net_event_data */ #include #endif -#if defined(__Userspace_os_FreeBSD) +#if defined(__FreeBSD__) #include /* #include was a 0 byte file. causes struct mtx redefinition */ #endif @@ -481,7 +458,7 @@ struct sx {int dummy;}; * userspace as well? */ /* on FreeBSD, this results in a redefintion of struct route */ /* #include */ -#if !defined(__Userspace_os_Windows) && !defined(__Userspace_os_NaCl) +#if !defined(_WIN32) && !defined(__native_client__) #include #include #include @@ -497,7 +474,7 @@ struct sx {int dummy;}; /* for getifaddrs */ #include -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) #if defined(INET) || defined(INET6) #include #endif @@ -507,6 +484,8 @@ struct sx {int dummy;}; /* for close, etc. */ #include +/* for gettimeofday */ +#include #endif /* lots of errno's used and needed in userspace */ @@ -514,7 +493,7 @@ struct sx {int dummy;}; /* for offsetof */ #include -#if defined(SCTP_PROCESS_LEVEL_LOCKS) && !defined(__Userspace_os_Windows) +#if defined(SCTP_PROCESS_LEVEL_LOCKS) && !defined(_WIN32) /* for pthread_mutex_lock, pthread_mutex_unlock, etc. */ #include #endif @@ -525,21 +504,21 @@ struct sx {int dummy;}; #endif /* IPSEC */ #ifdef INET6 -#if defined(__Userspace_os_FreeBSD) +#if defined(__FreeBSD__) #include #endif #ifdef IPSEC #include #endif -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) #include #endif -#if defined(__Userspace_os_Darwin) || defined(__Userspace_os_FreeBSD) || defined(__Userspace_os_Linux) || defined(__Userspace_os_NetBSD) || defined(__Userspace_os_OpenBSD) || defined(__Userspace_os_Windows) +#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__linux__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(_WIN32) || defined(__EMSCRIPTEN__) #include "user_ip6_var.h" #else #include #endif -#if defined(__Userspace_os_FreeBSD) +#if defined(__FreeBSD__) #include #include /* #include was a 0 byte file */ @@ -554,7 +533,7 @@ struct sx {int dummy;}; #include "netinet/sctp_sha1.h" -#if __FreeBSD_version >= 700000 +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #endif @@ -607,12 +586,12 @@ MALLOC_DECLARE(SCTP_M_SOCKOPT); /* * */ -#if !defined(__Userspace_os_Darwin) +#if !defined(__APPLE__) #define USER_ADDR_NULL (NULL) /* FIX ME: temp */ #endif -#if defined(SCTP_DEBUG) #include +#if defined(SCTP_DEBUG) #define SCTPDBG(level, ...) \ { \ do { \ @@ -666,7 +645,7 @@ MALLOC_DECLARE(SCTP_M_SOCKOPT); #define SCTP_VRF_IFN_HASH_SIZE 3 #define SCTP_INIT_VRF_TABLEID(vrf) -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) #define SCTP_IFN_IS_IFT_LOOP(ifn) (strncmp((ifn)->ifn_name, "lo", 2) == 0) /* BSD definition */ /* #define SCTP_ROUTE_IS_REAL_LOOP(ro) ((ro)->ro_rt && (ro)->ro_rt->rt_ifa && (ro)->ro_rt->rt_ifa->ifa_ifp && (ro)->ro_rt->rt_ifa->ifa_ifp->if_type == IFT_LOOP) */ @@ -800,7 +779,7 @@ sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask); #define KTR_SUBSYS 1 /* The packed define for 64 bit platforms */ -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) #define SCTP_PACKED __attribute__((packed)) #define SCTP_UNUSED __attribute__((unused)) #else @@ -829,6 +808,13 @@ sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask); M_ALIGN(m, len); \ } +#if !defined(_WIN32) +#define SCTP_SNPRINTF(data, ...) \ + if (snprintf(data, __VA_ARGS__) < 0) { \ + data[0] = '\0'; \ + } +#endif + /* We make it so if you have up to 4 threads * writting based on the default size of * the packet log 65 k, that would be @@ -887,7 +873,6 @@ static inline void sctp_userspace_rtfree(sctp_rtentry_t *rt) return; } free(rt); - rt = NULL; } #define rtfree(arg1) sctp_userspace_rtfree(arg1) @@ -901,17 +886,13 @@ int sctp_userspace_get_mtu_from_ifn(uint32_t if_index, int af); #define SCTP_GATHER_MTU_FROM_ROUTE(sctp_ifa, sa, rt) ((rt != NULL) ? rt->rt_rmx.rmx_mtu : 0) -#define SCTP_GATHER_MTU_FROM_INTFC(sctp_ifn) sctp_userspace_get_mtu_from_ifn(if_nametoindex(((struct ifaddrs *) (sctp_ifn))->ifa_name), AF_INET) +#define SCTP_GATHER_MTU_FROM_INTFC(sctp_ifn) (sctp_ifn->ifn_mtu) #define SCTP_SET_MTU_OF_ROUTE(sa, rt, mtu) do { \ if (rt != NULL) \ rt->rt_rmx.rmx_mtu = mtu; \ } while(0) -/* (de-)register interface event notifications */ -#define SCTP_REGISTER_INTERFACE(ifhandle, af) -#define SCTP_DEREGISTER_INTERFACE(ifhandle, af) - /*************************/ /* These are for logging */ @@ -961,7 +942,7 @@ int sctp_userspace_get_mtu_from_ifn(uint32_t if_index, int af); */ /* get the v6 hop limit */ -#define SCTP_GET_HLIM(inp, ro) 128 /* As done for __Windows__ */ +#define SCTP_GET_HLIM(inp, ro) 128 #define IPv6_HOP_LIMIT 128 /* is the endpoint v6only? */ @@ -1000,7 +981,7 @@ int sctp_userspace_get_mtu_from_ifn(uint32_t if_index, int af); /* sctp_pcb.h */ -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) #define SHUT_RD 1 #define SHUT_WR 2 #define SHUT_RDWR 3 @@ -1087,12 +1068,12 @@ sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, int how, int a /* with the current included files, this is defined in Linux but * in FreeBSD, it is behind a _KERNEL in sys/socket.h ... */ -#if defined(__Userspace_os_DragonFly) || defined(__Userspace_os_FreeBSD) || defined(__Userspace_os_OpenBSD) || defined(__Userspace_os_NaCl) +#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__native_client__) /* stolen from /usr/include/sys/socket.h */ #define CMSG_ALIGN(n) _ALIGN(n) -#elif defined(__Userspace_os_NetBSD) +#elif defined(__NetBSD__) #define CMSG_ALIGN(n) (((n) + __ALIGNBYTES) & ~__ALIGNBYTES) -#elif defined(__Userspace_os_Darwin) +#elif defined(__APPLE__) #if !defined(__DARWIN_ALIGNBYTES) #define __DARWIN_ALIGNBYTES (sizeof(__darwin_size_t) - 1) #endif @@ -1127,7 +1108,7 @@ sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, int how, int a } while (0) #endif -#if defined(__Userspace_os_Linux) +#if defined(__linux__) #if !defined(TAILQ_FOREACH_SAFE) #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = ((head)->tqh_first); \ @@ -1141,12 +1122,12 @@ sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, int how, int a (var) = (tvar)) #endif #endif -#if defined(__Userspace_os_DragonFly) +#if defined(__DragonFly__) #define TAILQ_FOREACH_SAFE TAILQ_FOREACH_MUTABLE #define LIST_FOREACH_SAFE LIST_FOREACH_MUTABLE #endif -#if defined(__Userspace_os_NaCl) +#if defined(__native_client__) #define timercmp(tvp, uvp, cmp) \ (((tvp)->tv_sec == (uvp)->tv_sec) ? \ ((tvp)->tv_usec cmp (uvp)->tv_usec) : \ @@ -1155,7 +1136,7 @@ sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, int how, int a #define SCTP_IS_LISTENING(inp) ((inp->sctp_flags & SCTP_PCB_FLAGS_ACCEPTING) != 0) -#if defined(__Userspace_os_Darwin) || defined(__Userspace_os_DragonFly) || defined(__Userspace_os_Linux) || defined(__Userspace_os_NaCl) || defined(__Userspace_os_NetBSD) || defined(__Userspace_os_Windows) || defined(__Userspace_os_Fuchsia) +#if defined(__APPLE__) || defined(__DragonFly__) || defined(__linux__) || defined(__native_client__) || defined(__NetBSD__) || defined(_WIN32) || defined(__Fuchsia__) || defined(__EMSCRIPTEN__) int timingsafe_bcmp(const void *, const void *, size_t); #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_output.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_output.c index acc7307b9..db73927fe 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_output.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_output.c @@ -32,13 +32,13 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 357197 2020-01-28 10:09:05Z tuexen $"); +__FBSDID("$FreeBSD$"); #endif #include -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #endif #include @@ -56,35 +56,30 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 357197 2020-01-28 10:09:05Z t #include #include #include -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #endif -#if defined(__Userspace_os_Linux) +#if defined(__linux__) #define __FAVOR_BSD /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */ #endif #if defined(INET) || defined(INET6) -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) #include #endif #endif +#if !defined(__Userspace__) #if defined(__APPLE__) #include #endif -#if defined(__FreeBSD__) -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -#endif #include #endif +#endif #if defined(__Userspace__) && defined(INET6) #include #endif - -#if defined(__APPLE__) -#define APPLE_FILE_NO 3 -#endif - -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)) #define SCTP_MAX_LINKHDR 16 #endif @@ -1894,7 +1889,6 @@ const struct sack_track sack_array[256] = { } }; - int sctp_is_address_in_scope(struct sctp_ifa *ifa, struct sctp_scoping *scope, @@ -1933,14 +1927,12 @@ sctp_is_address_in_scope(struct sctp_ifa *ifa, if (scope->ipv6_addr_legal) { struct sockaddr_in6 *sin6; -#if !defined(__Panda__) /* Must update the flags, bummer, which * means any IFA locks must now be applied HERE <-> */ if (do_update) { sctp_gather_internal_ifa_flags(ifa); } -#endif if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { return (0); } @@ -2065,7 +2057,6 @@ sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len) #endif } - struct mbuf * sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_scoping *scope, @@ -2103,7 +2094,7 @@ sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb, continue; } LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #ifdef INET if ((sctp_ifap->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, @@ -2153,7 +2144,7 @@ sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb, continue; } LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #ifdef INET if ((sctp_ifap->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, @@ -2486,7 +2477,6 @@ sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) return (0); } - int sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) { @@ -2507,8 +2497,6 @@ sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) return (0); } - - static struct sctp_ifa * sctp_choose_boundspecific_inp(struct sctp_inpcb *inp, sctp_route_t *ro, @@ -2541,7 +2529,7 @@ sctp_choose_boundspecific_inp(struct sctp_inpcb *inp, if (sctp_ifn) { /* is a preferred one on the interface we route out? */ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #ifdef INET if ((sctp_ifa->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, @@ -2643,8 +2631,6 @@ sctp_choose_boundspecific_inp(struct sctp_inpcb *inp, return (NULL); } - - static struct sctp_ifa * sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp, struct sctp_tcb *stcb, @@ -2673,7 +2659,7 @@ sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp, ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); - sctp_ifn = sctp_find_ifn( ifn, ifn_index); + sctp_ifn = sctp_find_ifn(ifn, ifn_index); /* * first question, is the ifn we will emit on in our list? If so, @@ -2683,7 +2669,7 @@ sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp, if (sctp_ifn) { /* first try for a preferred address on the ep */ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #ifdef INET if ((sctp_ifa->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, @@ -2719,7 +2705,7 @@ sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp, } /* next try for an acceptable address on the ep */ LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #ifdef INET if ((sctp_ifa->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, @@ -2753,7 +2739,6 @@ sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp, return (sifa); } } - } /* * if we can't find one like that then we must look at all @@ -2840,19 +2825,18 @@ sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp, static struct sctp_ifa * sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) struct sctp_inpcb *inp, #else struct sctp_inpcb *inp SCTP_UNUSED, #endif - struct sctp_tcb *stcb, - int non_asoc_addr_ok, - uint8_t dest_is_loop, - uint8_t dest_is_priv, - int addr_wanted, - sa_family_t fam, - sctp_route_t *ro - ) + struct sctp_tcb *stcb, + int non_asoc_addr_ok, + uint8_t dest_is_loop, + uint8_t dest_is_priv, + int addr_wanted, + sa_family_t fam, + sctp_route_t *ro) { struct sctp_ifa *ifa, *sifa; int num_eligible_addr = 0; @@ -2871,7 +2855,7 @@ sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn, #endif /* SCTP_EMBEDDED_V6_SCOPE */ #endif /* INET6 */ LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #ifdef INET if ((ifa->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, @@ -2971,10 +2955,9 @@ sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn, return (NULL); } - static int sctp_count_num_preferred_boundall(struct sctp_ifn *ifn, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) struct sctp_inpcb *inp, #else struct sctp_inpcb *inp SCTP_UNUSED, @@ -2989,7 +2972,7 @@ sctp_count_num_preferred_boundall(struct sctp_ifn *ifn, int num_eligible_addr = 0; LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #ifdef INET if ((ifa->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, @@ -3199,7 +3182,7 @@ again_with_private_addresses_allowed: } LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) { SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa); -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #ifdef INET if ((sctp_ifa->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, @@ -3265,7 +3248,7 @@ again_with_private_addresses_allowed: continue; } LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #ifdef INET if ((sctp_ifa->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, @@ -3333,7 +3316,7 @@ out: LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { struct sctp_ifa *tmp_sifa; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #ifdef INET if ((sctp_ifa->address.sa.sa_family == AF_INET) && (prison_check_ip4(inp->ip_inp.inp.inp_cred, @@ -3391,8 +3374,6 @@ out: return (sifa); } - - /* tcb may be NULL */ struct sctp_ifa * sctp_source_address_selection(struct sctp_inpcb *inp, @@ -3474,16 +3455,24 @@ sctp_source_address_selection(struct sctp_inpcb *inp, * addresses. If the bound set is NOT assigned to the interface then * we must use rotation amongst the bound addresses.. */ +#if defined(__FreeBSD__) && !defined(__Userspace__) + if (ro->ro_nh == NULL) { +#else if (ro->ro_rt == NULL) { +#endif /* * Need a route to cache. */ SCTP_RTALLOC(ro, vrf_id, inp->fibnum); } +#if defined(__FreeBSD__) && !defined(__Userspace__) + if (ro->ro_nh == NULL) { +#else if (ro->ro_rt == NULL) { +#endif return (NULL); } -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) /* On Windows the sa_family is U_SHORT or ADDRESS_FAMILY */ fam = (sa_family_t)ro->ro_dst.sa_family; #else @@ -3509,7 +3498,7 @@ sctp_source_address_selection(struct sctp_inpcb *inp, #ifdef INET6 case AF_INET6: /* Scope based on outbound address */ -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) { #else if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) || @@ -3566,7 +3555,7 @@ sctp_source_address_selection(struct sctp_inpcb *inp, static int sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) WSACMSGHDR cmh; #else struct cmsghdr cmh; @@ -3671,7 +3660,7 @@ sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize) static int sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) WSACMSGHDR cmh; #else struct cmsghdr cmh; @@ -3747,9 +3736,8 @@ sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *er } for (i = 0; i < stcb->asoc.streamoutcnt; i++) { TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); + stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL); stcb->asoc.strmout[i].chunks_on_queues = 0; - stcb->asoc.strmout[i].next_mid_ordered = 0; - stcb->asoc.strmout[i].next_mid_unordered = 0; #if defined(SCTP_DETAILED_STR_STATS) for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { stcb->asoc.strmout[i].abandoned_sent[j] = 0; @@ -3759,10 +3747,11 @@ sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *er stcb->asoc.strmout[i].abandoned_sent[0] = 0; stcb->asoc.strmout[i].abandoned_unsent[0] = 0; #endif + stcb->asoc.strmout[i].next_mid_ordered = 0; + stcb->asoc.strmout[i].next_mid_unordered = 0; stcb->asoc.strmout[i].sid = i; stcb->asoc.strmout[i].last_msg_incomplete = 0; stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING; - stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL); } } break; @@ -3849,7 +3838,7 @@ sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p, struct sctp_nets **net_p, int *error) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) WSACMSGHDR cmh; #else struct cmsghdr cmh; @@ -3948,8 +3937,6 @@ sctp_add_cookie(struct mbuf *init, int init_offset, struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret; struct sctp_state_cookie *stc; struct sctp_paramhdr *ph; - uint8_t *foo; - int sig_offset; uint16_t cookie_sz; mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) + @@ -4013,25 +4000,21 @@ sctp_add_cookie(struct mbuf *init, int init_offset, break; } } - sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA); + sig = sctp_get_mbuf_for_msg(SCTP_SIGNATURE_SIZE, 0, M_NOWAIT, 1, MT_DATA); if (sig == NULL) { /* no space, so free the entire chain */ sctp_m_freem(mret); return (NULL); } - SCTP_BUF_LEN(sig) = 0; SCTP_BUF_NEXT(m_at) = sig; - sig_offset = 0; - foo = (uint8_t *) (mtod(sig, caddr_t) + sig_offset); - memset(foo, 0, SCTP_SIGNATURE_SIZE); - *signature = foo; - SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE; + SCTP_BUF_LEN(sig) = SCTP_SIGNATURE_SIZE; cookie_sz += SCTP_SIGNATURE_SIZE; ph->param_length = htons(cookie_sz); + *signature = (uint8_t *)mtod(sig, caddr_t); + memset(*signature, 0, SCTP_SIGNATURE_SIZE); return (mret); } - static uint8_t sctp_get_ect(struct sctp_tcb *stcb) { @@ -4105,15 +4088,10 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, uint32_t v_tag, uint16_t port, union sctp_sockstore *over_addr, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t mflowtype, uint32_t mflowid, #endif -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - int so_locked SCTP_UNUSED -#else - int so_locked -#endif - ) +int so_locked) /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */ { /** @@ -4129,9 +4107,6 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, * interface and smallest_mtu size as well. */ /* Will need ifdefs around this */ -#ifdef __Panda__ - pakhandle_type o_pak; -#endif struct mbuf *newm; struct sctphdr *sctphdr; int packet_length; @@ -4140,18 +4115,16 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, uint32_t vrf_id; #endif #if defined(INET) || defined(INET6) -#if !defined(__Panda__) struct mbuf *o_pak; -#endif sctp_route_t *ro = NULL; struct udphdr *udp = NULL; #endif uint8_t tos_value; -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so = NULL; #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (so_locked) { sctp_lock_assert(SCTP_INP_SO(inp)); SCTP_TCB_LOCK_ASSERT(stcb); @@ -4206,7 +4179,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, SCTP_BUF_LEN(newm) = len; SCTP_BUF_NEXT(newm) = m; m = newm; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (net != NULL) { m->m_pkthdr.flowid = net->flowid; M_HASHTYPE_SET(m, net->flowtype); @@ -4224,36 +4197,30 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, * This means especially, that it is not set at the * SCTP layer. So use the value from the IP layer. */ -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) tos_value = inp->ip_inp.inp.inp_ip_tos; -#else - tos_value = inp->inp_ip_tos; -#endif } tos_value &= 0xfc; if (ecn_ok) { tos_value |= sctp_get_ect(stcb); } if ((nofragment_flag) && (port == 0)) { -#if defined(__FreeBSD__) -#if __FreeBSD_version >= 1000000 +#if defined(__FreeBSD__) && !defined(__Userspace__) ip->ip_off = htons(IP_DF); -#else - ip->ip_off = IP_DF; -#endif -#elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) || defined(__Userspace_os_Darwin) +#elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) ip->ip_off = IP_DF; #else ip->ip_off = htons(IP_DF); #endif } else { -#if defined(__FreeBSD__) && __FreeBSD_version >= 1000000 +#if defined(__FreeBSD__) && !defined(__Userspace__) ip->ip_off = htons(0); #else ip->ip_off = 0; #endif } -#if defined(__FreeBSD__) +#if defined(__Userspace__) + ip->ip_id = htons(SCTP_IP_ID(inp)++); +#elif defined(__FreeBSD__) /* FreeBSD has a function for ip_id's */ ip_fillid(ip); #elif defined(__APPLE__) @@ -4262,18 +4229,12 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, #else ip->ip_id = htons(ip_id++); #endif -#elif defined(__Userspace__) - ip->ip_id = htons(SCTP_IP_ID(inp)++); #else ip->ip_id = SCTP_IP_ID(inp)++; #endif -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl; -#else - ip->ip_ttl = inp->inp_ip_ttl; -#endif -#if defined(__FreeBSD__) && __FreeBSD_version >= 1000000 +#if defined(__FreeBSD__) && !defined(__Userspace__) ip->ip_len = htons(packet_length); #else ip->ip_len = packet_length; @@ -4305,10 +4266,14 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, sctp_free_ifa(net->ro._s_addr); net->ro._s_addr = NULL; net->src_addr_selected = 0; +#if defined(__FreeBSD__) && !defined(__Userspace__) + RO_NHFREE(ro); +#else if (ro->ro_rt) { RTFREE(ro->ro_rt); ro->ro_rt = NULL; } +#endif } if (net->src_addr_selected == 0) { /* Cache the source address */ @@ -4358,8 +4323,8 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); udp->uh_dport = port; udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip))); -#if !defined(__Windows__) && !defined(__Userspace__) -#if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000) +#if !defined(__Userspace__) +#if defined(__FreeBSD__) if (V_udp_cksum) { udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); } else { @@ -4388,7 +4353,11 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, * that somewhere and abort the association right away * (assuming this is an INIT being sent). */ +#if defined(__FreeBSD__) && !defined(__Userspace__) + if (ro->ro_nh == NULL) { +#else if (ro->ro_rt == NULL) { +#endif /* * src addr selection failed to find a route (or * valid source addr), so we can't get there from @@ -4406,8 +4375,13 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, (uint32_t) (ntohl(ip->ip_src.s_addr))); SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n", (uint32_t)(ntohl(ip->ip_dst.s_addr))); +#if defined(__FreeBSD__) && !defined(__Userspace__) + SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n", + (void *)ro->ro_nh); +#else SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n", (void *)ro->ro_rt); +#endif if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { /* failed to prepend data, give up */ @@ -4419,8 +4393,8 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, if (port) { sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr)); SCTP_STAT_INCR(sctps_sendswcrc); -#if !defined(__Windows__) && !defined(__Userspace__) -#if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000) +#if !defined(__Userspace__) +#if defined(__FreeBSD__) if (V_udp_cksum) { SCTP_ENABLE_UDP_CSUM(o_pak); } @@ -4429,7 +4403,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, #endif #endif } else { -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 +#if defined(__FreeBSD__) && !defined(__Userspace__) m->m_pkthdr.csum_flags = CSUM_SCTP; m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum); SCTP_STAT_INCR(sctps_sendhwcrc); @@ -4448,17 +4422,17 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, sctp_packet_log(o_pak); #endif /* send it out. table id is taken from stcb */ -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { so = SCTP_INP_SO(inp); SCTP_SOCKET_UNLOCK(so, 0); } #endif -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) SCTP_PROBE5(send, NULL, stcb, ip, stcb, sctphdr); #endif SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -4467,7 +4441,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, atomic_subtract_int(&stcb->asoc.refcnt, 1); } #endif -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (port) { UDPSTAT_INC(udps_opackets); } @@ -4480,8 +4454,8 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret); if (net == NULL) { /* free tempy routes */ -#if defined(__FreeBSD__) && __FreeBSD_version > 901000 - RO_RTFREE(ro); +#if defined(__FreeBSD__) && !defined(__Userspace__) + RO_NHFREE(ro); #else if (ro->ro_rt) { RTFREE(ro->ro_rt); @@ -4489,11 +4463,19 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, } #endif } else { +#if defined(__FreeBSD__) && !defined(__Userspace__) + if ((ro->ro_nh != NULL) && (net->ro._s_addr) && +#else if ((ro->ro_rt != NULL) && (net->ro._s_addr) && +#endif ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) { uint32_t mtu; +#if defined(__FreeBSD__) && !defined(__Userspace__) + mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh); +#else mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt); +#endif if (mtu > 0) { if (net->port) { mtu -= sizeof(struct udphdr); @@ -4505,7 +4487,11 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, net->mtu = mtu; } } +#if defined(__FreeBSD__) && !defined(__Userspace__) + } else if (ro->ro_nh == NULL) { +#else } else if (ro->ro_rt == NULL) { +#endif /* route was freed */ if (net->ro._s_addr && net->src_addr_selected) { @@ -4524,7 +4510,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, uint32_t flowlabel, flowinfo; struct ip6_hdr *ip6h; struct route_in6 ip6route; -#if !(defined(__Panda__) || defined(__Userspace__)) +#if !defined(__Userspace__) struct ifnet *ifp; #endif struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp; @@ -4548,7 +4534,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, * This means especially, that it is not set at the * SCTP layer. So use the value from the IP layer. */ -#if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)) +#if defined(__APPLE__) && !defined(__Userspace__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)) flowlabel = ntohl(inp->ip_inp.inp.inp_flow); #else flowlabel = ntohl(((struct inpcb *)inp)->inp_flow); @@ -4569,7 +4555,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, SCTP_BUF_LEN(newm) = len; SCTP_BUF_NEXT(newm) = m; m = newm; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (net != NULL) { m->m_pkthdr.flowid = net->flowid; M_HASHTYPE_SET(m, net->flowtype); @@ -4588,7 +4574,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, #ifdef SCTP_EMBEDDED_V6_SCOPE /* KAME hack: embed scopeid */ -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0) #else @@ -4625,12 +4611,10 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, * This means especially, that it is not set at the * SCTP layer. So use the value from the IP layer. */ -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) -#if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)) +#if defined(__APPLE__) && !defined(__Userspace__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)) tos_value = (ntohl(inp->ip_inp.inp.inp_flow) >> 20) & 0xff; #else tos_value = (ntohl(((struct inpcb *)inp)->inp_flow) >> 20) & 0xff; -#endif #endif } tos_value &= 0xfc; @@ -4667,16 +4651,20 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, sctp_free_ifa(net->ro._s_addr); net->ro._s_addr = NULL; net->src_addr_selected = 0; +#if defined(__FreeBSD__) && !defined(__Userspace__) + RO_NHFREE(ro); +#else if (ro->ro_rt) { RTFREE(ro->ro_rt); ro->ro_rt = NULL; } +#endif } if (net->src_addr_selected == 0) { #ifdef SCTP_EMBEDDED_V6_SCOPE sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; /* KAME hack: embed scopeid */ -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0) #else @@ -4722,7 +4710,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, #ifdef SCTP_EMBEDDED_V6_SCOPE sin6 = (struct sockaddr_in6 *)&ro->ro_dst; /* KAME hack: embed scopeid */ -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0) #else @@ -4768,7 +4756,11 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, } lsa6->sin6_port = inp->sctp_lport; +#if defined(__FreeBSD__) && !defined(__Userspace__) + if (ro->ro_nh == NULL) { +#else if (ro->ro_rt == NULL) { +#endif /* * src addr selection failed to find a route (or * valid source addr), so we can't get there from @@ -4836,7 +4828,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, * that our ro pointer is now filled */ ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro); -#if !(defined(__Panda__) || defined(__Userspace__)) +#if !defined(__Userspace__) ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); #endif @@ -4868,27 +4860,20 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, if (port) { sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); SCTP_STAT_INCR(sctps_sendswcrc); -#if defined(__Windows__) +#if !defined(__Userspace__) +#if defined(_WIN32) udp->uh_sum = 0; -#elif !defined(__Userspace__) +#else if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) { udp->uh_sum = 0xffff; } #endif - } else { -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 -#if __FreeBSD_version < 900000 - sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr)); - SCTP_STAT_INCR(sctps_sendswcrc); -#else -#if __FreeBSD_version > 901000 - m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6; -#else - m->m_pkthdr.csum_flags = CSUM_SCTP; #endif + } else { +#if defined(__FreeBSD__) && !defined(__Userspace__) + m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6; m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum); SCTP_STAT_INCR(sctps_sendhwcrc); -#endif #else if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) && (stcb) && (stcb->asoc.scope.loopback_scope))) { @@ -4900,7 +4885,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, #endif } /* send it out. table id is taken from stcb */ -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { so = SCTP_INP_SO(inp); SCTP_SOCKET_UNLOCK(so, 0); @@ -4910,7 +4895,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) sctp_packet_log(o_pak); #endif -#if !(defined(__Panda__) || defined(__Userspace__)) +#if !defined(__Userspace__) #if defined(__FreeBSD__) SCTP_PROBE5(send, NULL, stcb, ip6h, stcb, sctphdr); #endif @@ -4918,7 +4903,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, #else SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, stcb, vrf_id); #endif -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -4933,7 +4918,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, sin6->sin6_port = prev_port; } SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret); -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (port) { UDPSTAT_INC(udps_opackets); } @@ -4945,8 +4930,8 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, } if (net == NULL) { /* Now if we had a temp route free it */ -#if defined(__FreeBSD__) && __FreeBSD_version > 901000 - RO_RTFREE(ro); +#if defined(__FreeBSD__) && !defined(__Userspace__) + RO_NHFREE(ro); #else if (ro->ro_rt) { RTFREE(ro->ro_rt); @@ -4955,7 +4940,11 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, #endif } else { /* PMTU check versus smallest asoc MTU goes here */ +#if defined(__FreeBSD__) && !defined(__Userspace__) + if (ro->ro_nh == NULL) { +#else if (ro->ro_rt == NULL) { +#endif /* Route was freed */ if (net->ro._s_addr && net->src_addr_selected) { @@ -4964,11 +4953,19 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, } net->src_addr_selected = 0; } +#if defined(__FreeBSD__) && !defined(__Userspace__) + if ((ro->ro_nh != NULL) && (net->ro._s_addr) && +#else if ((ro->ro_rt != NULL) && (net->ro._s_addr) && +#endif ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) { uint32_t mtu; +#if defined(__FreeBSD__) && !defined(__Userspace__) + mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh); +#else mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt); +#endif if (mtu > 0) { if (net->port) { mtu -= sizeof(struct udphdr); @@ -4981,9 +4978,9 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, } } } -#if !defined(__Panda__) && !defined(__Userspace__) +#if !defined(__Userspace__) else if (ifp) { -#if defined(__Windows__) +#if defined(_WIN32) #define ND_IFINFO(ifp) (ifp) #define linkmtu if_mtu #endif @@ -5058,13 +5055,8 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, } } - void -sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ) +sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked) { struct mbuf *m, *m_last; struct sctp_nets *net; @@ -5077,7 +5069,7 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked int error; uint16_t num_ext, chunk_len, padding_len, parameter_len; -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (so_locked) { sctp_lock_assert(SCTP_INP_SO(inp)); } else { @@ -5271,7 +5263,7 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked } /* now any cookie time extensions */ - if (stcb->asoc.cookie_preserve_req) { + if (stcb->asoc.cookie_preserve_req > 0) { struct sctp_cookie_perserve_param *cookie_preserve; if (padding_len > 0) { @@ -5342,7 +5334,7 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked m, 0, NULL, 0, 0, 0, 0, inp->sctp_lport, stcb->rport, htonl(0), net->port, NULL, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) 0, 0, #endif so_locked))) { @@ -5557,7 +5549,6 @@ sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt, } } return (op_err); - break; } default: /* @@ -5649,7 +5640,6 @@ sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt, at += SCTP_SIZE32(plen); } break; - } phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); } @@ -5692,24 +5682,23 @@ sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt, return (op_err); } -static int +/* + * Given a INIT chunk, look through the parameters to verify that there + * are no new addresses. + * Return true, if there is a new address or there is a problem parsing + the parameters. Provide an optional error cause used when sending an ABORT. + * Return false, if there are no new addresses and there is no problem in + parameter processing. + */ +static bool sctp_are_there_new_addresses(struct sctp_association *asoc, - struct mbuf *in_initpkt, int offset, struct sockaddr *src) + struct mbuf *in_initpkt, int offset, int limit, struct sockaddr *src, + struct mbuf **op_err) { - /* - * Given a INIT packet, look through the packet to verify that there - * are NO new addresses. As we go through the parameters add reports - * of any un-understood parameters that require an error. Also we - * must return (1) to drop the packet if we see a un-understood - * parameter that tells us to drop the chunk. - */ struct sockaddr *sa_touse; struct sockaddr *sa; struct sctp_paramhdr *phdr, params; - uint16_t ptype, plen; - uint8_t fnd; struct sctp_nets *net; - int check_src; #ifdef INET struct sockaddr_in sin4, *sa4; #endif @@ -5719,7 +5708,10 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, #if defined(__Userspace__) struct sockaddr_conn *sac; #endif + uint16_t ptype, plen; + bool fnd, check_src; + *op_err = NULL; #ifdef INET memset(&sin4, 0, sizeof(sin4)); sin4.sin_family = AF_INET; @@ -5735,26 +5727,26 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, #endif #endif /* First what about the src address of the pkt ? */ - check_src = 0; + check_src = false; switch (src->sa_family) { #ifdef INET case AF_INET: if (asoc->scope.ipv4_addr_legal) { - check_src = 1; + check_src = true; } break; #endif #ifdef INET6 case AF_INET6: if (asoc->scope.ipv6_addr_legal) { - check_src = 1; + check_src = true; } break; #endif #if defined(__Userspace__) case AF_CONN: if (asoc->scope.conn_addr_legal) { - check_src = 1; + check_src = true; } break; #endif @@ -5763,7 +5755,7 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, break; } if (check_src) { - fnd = 0; + fnd = false; TAILQ_FOREACH(net, &asoc->nets, sctp_next) { sa = (struct sockaddr *)&net->ro._l_addr; if (sa->sa_family == src->sa_family) { @@ -5774,7 +5766,7 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, sa4 = (struct sockaddr_in *)sa; src4 = (struct sockaddr_in *)src; if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) { - fnd = 1; + fnd = true; break; } } @@ -5786,7 +5778,7 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, sa6 = (struct sockaddr_in6 *)sa; src6 = (struct sockaddr_in6 *)src; if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) { - fnd = 1; + fnd = true; break; } } @@ -5798,16 +5790,22 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, sac = (struct sockaddr_conn *)sa; srcc = (struct sockaddr_conn *)src; if (sac->sconn_addr == srcc->sconn_addr) { - fnd = 1; + fnd = true; break; } } #endif } } - if (fnd == 0) { - /* New address added! no need to look further. */ - return (1); + if (!fnd) { + /* + * If sending an ABORT in case of an additional address, + * don't use the new address error cause. + * This looks no different than if no listener was + * present. + */ + *op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Address added"); + return (true); } } /* Ok so far lets munge through the rest of the packet */ @@ -5817,6 +5815,14 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, sa_touse = NULL; ptype = ntohs(phdr->param_type); plen = ntohs(phdr->param_length); + if (offset + plen > limit) { + *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Partial parameter"); + return (true); + } + if (plen < sizeof(struct sctp_paramhdr)) { + *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Parameter length too small"); + return (true); + } switch (ptype) { #ifdef INET case SCTP_IPV4_ADDRESS: @@ -5824,12 +5830,14 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, struct sctp_ipv4addr_param *p4, p4_buf; if (plen != sizeof(struct sctp_ipv4addr_param)) { - return (1); + *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Parameter length illegal"); + return (true); } phdr = sctp_get_next_param(in_initpkt, offset, (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf)); if (phdr == NULL) { - return (1); + *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, ""); + return (true); } if (asoc->scope.ipv4_addr_legal) { p4 = (struct sctp_ipv4addr_param *)phdr; @@ -5845,12 +5853,14 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, struct sctp_ipv6addr_param *p6, p6_buf; if (plen != sizeof(struct sctp_ipv6addr_param)) { - return (1); + *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Parameter length illegal"); + return (true); } phdr = sctp_get_next_param(in_initpkt, offset, (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf)); if (phdr == NULL) { - return (1); + *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, ""); + return (true); } if (asoc->scope.ipv6_addr_legal) { p6 = (struct sctp_ipv6addr_param *)phdr; @@ -5867,7 +5877,7 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, } if (sa_touse) { /* ok, sa_touse points to one to check */ - fnd = 0; + fnd = false; TAILQ_FOREACH(net, &asoc->nets, sctp_next) { sa = (struct sockaddr *)&net->ro._l_addr; if (sa->sa_family != sa_touse->sa_family) { @@ -5878,7 +5888,7 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, sa4 = (struct sockaddr_in *)sa; if (sa4->sin_addr.s_addr == sin4.sin_addr.s_addr) { - fnd = 1; + fnd = true; break; } } @@ -5888,21 +5898,31 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, sa6 = (struct sockaddr_in6 *)sa; if (SCTP6_ARE_ADDR_EQUAL( sa6, &sin6)) { - fnd = 1; + fnd = true; break; } } #endif } if (!fnd) { - /* New addr added! no need to look further */ - return (1); + /* + * If sending an ABORT in case of an additional + * address, don't use the new address error + * cause. + * This looks no different than if no listener + * was present. + */ + *op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Address added"); + return (true); } } offset += SCTP_SIZE32(plen); + if (offset >= limit) { + break; + } phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params)); } - return (0); + return (false); } /* @@ -5917,8 +5937,8 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int iphlen, int offset, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, struct sctp_init_chunk *init_chk, -#if defined(__FreeBSD__) - uint8_t mflowtype, uint32_t mflowid, +#if defined(__FreeBSD__) && !defined(__Userspace__) + uint8_t mflowtype, uint32_t mflowid, #endif uint32_t vrf_id, uint16_t port) { @@ -5965,18 +5985,12 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, } if ((asoc != NULL) && (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) { - if (sctp_are_there_new_addresses(asoc, init_pkt, offset, src)) { + if (sctp_are_there_new_addresses(asoc, init_pkt, offset, offset + ntohs(init_chk->ch.chunk_length), src, &op_err)) { /* * new addresses, out of here in non-cookie-wait states - * - * Send an ABORT, without the new address error cause. - * This looks no different than if no listener - * was present. */ - op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), - "Address added"); sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, inp->fibnum, #endif vrf_id, port); @@ -5994,7 +6008,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Remote encapsulation port changed"); sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, inp->fibnum, #endif vrf_id, port); @@ -6012,13 +6026,13 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, if (op_err == NULL) { char msg[SCTP_DIAG_INFO_LEN]; - snprintf(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__); + SCTP_SNPRINTF(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__); op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), msg); } sctp_send_abort(init_pkt, iphlen, src, dst, sh, init_chk->init.initiate_tag, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, inp->fibnum, #endif vrf_id, port); @@ -6133,7 +6147,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, { stc.addr_type = SCTP_IPV6_ADDRESS; memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr)); -#if defined(__FreeBSD__) && (((__FreeBSD_version < 900000) && (__FreeBSD_version >= 804000)) || (__FreeBSD_version > 900000)) +#if defined(__FreeBSD__) && !defined(__Userspace__) stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr)); #else stc.scope_id = 0; @@ -6154,7 +6168,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, * because we share one link that all links are * common. */ -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) /* Mac OS X currently doesn't have in6_getscope() */ stc.scope_id = src6->sin6_addr.s6_addr16[1]; #endif @@ -6261,7 +6275,6 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, } net->src_addr_selected = 1; - } stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr; stc.laddress[1] = 0; @@ -6348,7 +6361,9 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, atomic_add_int(&asoc->refcnt, 1); SCTP_TCB_UNLOCK(stcb); new_tag: + SCTP_INP_INFO_RLOCK(); vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1); + SCTP_INP_INFO_RUNLOCK(); if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) { /* Got a duplicate vtag on some guy behind a nat * make sure we don't use it. @@ -6364,7 +6379,9 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, } else { SCTP_INP_INCR_REF(inp); SCTP_INP_RUNLOCK(inp); + SCTP_INP_INFO_RLOCK(); vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1); + SCTP_INP_INFO_RUNLOCK(); initack->init.initiate_tag = htonl(vtag); /* get a TSN to use too */ initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep)); @@ -6626,6 +6643,27 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)], SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr), (uint8_t *)signature, SCTP_SIGNATURE_SIZE); +#if defined(__Userspace__) + /* + * Don't put AF_CONN addresses on the wire, in case this is critical + * for the application. However, they are protected by the HMAC and + * need to be reconstructed before checking the HMAC. + * Clearing is only done in the mbuf chain, since the local stc is + * not used anymore. + */ + if (stc.addr_type == SCTP_CONN_ADDRESS) { + const void *p = NULL; + + m_copyback(m_cookie, sizeof(struct sctp_paramhdr) + offsetof(struct sctp_state_cookie, address), + (int)sizeof(void *), (caddr_t)&p); + } + if (stc.laddr_type == SCTP_CONN_ADDRESS) { + const void *p = NULL; + + m_copyback(m_cookie, sizeof(struct sctp_paramhdr) + offsetof(struct sctp_state_cookie, laddress), + (int)sizeof(void *), (caddr_t)&p); + } +#endif /* * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return * here since the timer will drive a retranmission. @@ -6646,7 +6684,7 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 0, 0, inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag, port, over_addr, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, #endif SCTP_SO_NOT_LOCKED))) { @@ -6665,7 +6703,6 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); } - static void sctp_prune_prsctp(struct sctp_tcb *stcb, struct sctp_association *asoc, @@ -6690,13 +6727,13 @@ sctp_prune_prsctp(struct sctp_tcb *stcb, * This one is PR-SCTP AND buffer space * limited type */ - if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { + if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) { /* * Lower numbers equates to higher - * priority so if the one we are - * looking at has a larger or equal - * priority we want to drop the data - * and NOT retransmit it. + * priority. So if the one we are + * looking at has a larger priority, + * we want to drop the data and NOT + * retransmit it. */ if (chk->data) { /* @@ -6725,7 +6762,7 @@ sctp_prune_prsctp(struct sctp_tcb *stcb, TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { /* Here we must move to the sent queue and mark */ if (PR_SCTP_BUF_ENABLED(chk->flags)) { - if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { + if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) { if (chk->data) { /* * We release the book_size @@ -6828,7 +6865,7 @@ sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp) /* TODO sctp_constants.h needs alternative time macros when * _KERNEL is undefined. */ -#ifndef __FreeBSD__ +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) timeradd(&sp->ts, &tv, &sp->ts); #else timevaladd(&sp->ts, &tv); @@ -6951,7 +6988,6 @@ out_now: return (error); } - static struct mbuf * sctp_copy_mbufchain(struct mbuf *clonechain, struct mbuf *outchain, @@ -6976,12 +7012,7 @@ sctp_copy_mbufchain(struct mbuf *clonechain, appendchain = clonechain; } else { if (!copy_by_ref && -#if defined(__Panda__) - 0 -#else - (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN))) -#endif - ) { + (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))) { /* Its not in a cluster */ if (*endofchain == NULL) { /* lets get a mbuf cluster */ @@ -7112,11 +7143,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp, int *num_out, int *reason_code, int control_only, int from_where, - struct timeval *now, int *now_filled, int frag_point, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ); + struct timeval *now, int *now_filled, int frag_point, int so_locked); static void sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, @@ -7179,7 +7206,7 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, * dis-appearing on us. */ atomic_add_int(&stcb->asoc.refcnt, 1); - sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED); + sctp_abort_an_association(inp, stcb, m, false, SCTP_SO_NOT_LOCKED); /* sctp_abort_an_association calls sctp_free_asoc() * free association will NOT free it since we * incremented the refcnt .. we do this to prevent @@ -7222,7 +7249,7 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, - asoc->primary_destination); + NULL); added_control = 1; do_chunk_output = 0; } @@ -7251,21 +7278,20 @@ sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, char msg[SCTP_DIAG_INFO_LEN]; abort_anyway: - snprintf(msg, sizeof(msg), - "%s:%d at %s", __FILE__, __LINE__, __func__); + SCTP_SNPRINTF(msg, sizeof(msg), + "%s:%d at %s", __FILE__, __LINE__, __func__); op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), msg); atomic_add_int(&stcb->asoc.refcnt, 1); sctp_abort_an_association(stcb->sctp_ep, stcb, - op_err, SCTP_SO_NOT_LOCKED); + op_err, false, SCTP_SO_NOT_LOCKED); atomic_add_int(&stcb->asoc.refcnt, -1); goto no_chunk_output; } sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, - asoc->primary_destination); + NULL); } } - } } un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + @@ -7313,7 +7339,9 @@ sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED) /* now free everything */ if (ca->inp) { /* Lets clear the flag to allow others to run. */ + SCTP_INP_WLOCK(ca->inp); ca->inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP; + SCTP_INP_WUNLOCK(ca->inp); } sctp_m_freem(ca->m); SCTP_FREE(ca, SCTP_M_COPYAL); @@ -7368,11 +7396,7 @@ sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, int ret; struct sctp_copy_all *ca; - if (inp->sctp_flags & SCTP_PCB_FLAGS_SND_ITERATOR_UP) { - /* There is another. */ - return (EBUSY); - } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) if (uio->uio_resid > SCTP_BASE_SYSCTL(sctp_sendall_limit)) { #else @@ -7397,6 +7421,18 @@ sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, if (srcv) { memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo)); } + + /* Serialize. */ + SCTP_INP_WLOCK(inp); + if ((inp->sctp_flags & SCTP_PCB_FLAGS_SND_ITERATOR_UP) != 0) { + SCTP_INP_WUNLOCK(inp); + sctp_m_freem(m); + SCTP_FREE(ca, SCTP_M_COPYAL); + return (EBUSY); + } + inp->sctp_flags |= SCTP_PCB_FLAGS_SND_ITERATOR_UP; + SCTP_INP_WUNLOCK(inp); + /* * take off the sendall flag, it would be bad if we failed to do * this :-0 @@ -7404,7 +7440,7 @@ sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL; /* get length and mbuf chain */ if (uio) { -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) ca->sndlen = uio->uio_resid; #else @@ -7413,15 +7449,19 @@ sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, #else ca->sndlen = uio->uio_resid; #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0); #endif ca->m = sctp_copy_out_all(uio, ca->sndlen); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0); #endif if (ca->m == NULL) { SCTP_FREE(ca, SCTP_M_COPYAL); + sctp_m_freem(m); + SCTP_INP_WLOCK(inp); + inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP; + SCTP_INP_WUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); return (ENOMEM); } @@ -7434,14 +7474,15 @@ sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, ca->sndlen += SCTP_BUF_LEN(mat); } } - inp->sctp_flags |= SCTP_PCB_FLAGS_SND_ITERATOR_UP; ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL, SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES, SCTP_ASOC_ANY_STATE, (void *)ca, 0, sctp_sendall_completes, inp, 1); if (ret) { + SCTP_INP_WLOCK(inp); inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP; + SCTP_INP_WUNLOCK(inp); SCTP_FREE(ca, SCTP_M_COPYAL); SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); return (EFAULT); @@ -7449,7 +7490,6 @@ sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, return (0); } - void sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc) { @@ -7497,7 +7537,6 @@ sctp_toss_old_asconf(struct sctp_tcb *stcb) } } - static void sctp_clean_up_datalist(struct sctp_tcb *stcb, struct sctp_association *asoc, @@ -7591,11 +7630,7 @@ sctp_clean_up_datalist(struct sctp_tcb *stcb, } static void -sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif -) +sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked) { struct sctp_tmit_chunk *chk, *nchk; @@ -7698,11 +7733,7 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, int *giveup, int eeor_mode, int *bail, - int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ) + int so_locked) { /* Move from the stream to the send_queue keeping track of the total */ struct sctp_association *asoc; @@ -7961,7 +7992,6 @@ re_look: sp->tail_mbuf = sp->data = NULL; sp->length = 0; #endif - } sctp_m_free(m); m = sp->data; @@ -8101,7 +8131,7 @@ re_look: sctp_auth_key_acquire(stcb, chk->auth_keyid); chk->holds_key_ref = 1; } -#if defined(__FreeBSD__) || defined(__Panda__) +#if defined(__FreeBSD__) && !defined(__Userspace__) chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1); #else chk->rec.data.tsn = asoc->sending_seq++; @@ -8221,14 +8251,9 @@ out_of: return (to_move); } - static void sctp_fill_outqueue(struct sctp_tcb *stcb, - struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif -) + struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked) { struct sctp_association *asoc; struct sctp_stream_out *strq; @@ -8276,7 +8301,11 @@ sctp_fill_outqueue(struct sctp_tcb *stcb, } strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc); total_moved += moved; - space_left -= moved; + if (space_left >= moved) { + space_left -= moved; + } else { + space_left = 0; + } if (space_left >= SCTP_DATA_CHUNK_OVERHEAD(stcb)) { space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb); } else { @@ -8348,11 +8377,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp, int *num_out, int *reason_code, int control_only, int from_where, - struct timeval *now, int *now_filled, int frag_point, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ) + struct timeval *now, int *now_filled, int frag_point, int so_locked) { /** * Ok this is the generic chunk service queue. we must do the @@ -8389,7 +8414,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp, the destination. */ int quit_now = 0; -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (so_locked) { sctp_lock_assert(SCTP_INP_SO(inp)); } else { @@ -8472,7 +8497,6 @@ sctp_med_chunk_output(struct sctp_inpcb *inp, break; } } - } if ((no_data_chunks == 0) && (skip_fill_up == 0) && @@ -8560,7 +8584,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp, } old_start_at = NULL; again_one_more_time: - for (net = start_at ; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { + for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { /* how much can we send? */ /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */ if (old_start_at && (old_start_at == net)) { @@ -8762,7 +8786,7 @@ again_one_more_time: inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), net->port, NULL, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) 0, 0, #endif so_locked))) { @@ -8961,7 +8985,7 @@ again_one_more_time: /* turn off the timer */ if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { sctp_timer_stop(SCTP_TIMER_TYPE_RECV, - inp, stcb, net, + inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1); } } @@ -9033,7 +9057,7 @@ again_one_more_time: inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), net->port, NULL, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) 0, 0, #endif so_locked))) { @@ -9375,7 +9399,7 @@ again_one_more_time: inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), net->port, NULL, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) 0, 0, #endif so_locked))) { @@ -9402,7 +9426,7 @@ again_one_more_time: * the top of the for, but just to make sure * I will reset these again here. */ - ctl_cnt = bundle_at = 0; + ctl_cnt = 0; continue; /* This takes us back to the for() for the nets. */ } else { asoc->ifp_had_enobuf = 0; @@ -9716,7 +9740,6 @@ sctp_send_cookie_ack(struct sctp_tcb *stcb) return; } - void sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net) { @@ -9967,16 +9990,11 @@ sctp_send_asconf_ack(struct sctp_tcb *stcb) return; } - static int sctp_chunk_retransmission(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_association *asoc, - int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ) + int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked) { /*- * send out one MTU of retransmission. If fast_retransmit is @@ -10005,7 +10023,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp, int data_auth_reqd = 0; uint32_t dmtu = 0; -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (so_locked) { sctp_lock_assert(SCTP_INP_SO(inp)); } else { @@ -10013,7 +10031,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp, } #endif SCTP_TCB_LOCK_ASSERT(stcb); - tmr_started = ctl_cnt = bundle_at = error = 0; + tmr_started = ctl_cnt = 0; no_fragmentflg = 1; fwd_tsn = 0; *cnt_out = 0; @@ -10086,7 +10104,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp, no_fragmentflg, 0, 0, inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), chk->whoTo->port, NULL, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) 0, 0, #endif so_locked))) { @@ -10149,13 +10167,13 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp, struct mbuf *op_err; char msg[SCTP_DIAG_INFO_LEN]; - snprintf(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up", - chk->rec.data.tsn, chk->snd_count); + SCTP_SNPRINTF(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up", + chk->rec.data.tsn, chk->snd_count); op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), msg); atomic_add_int(&stcb->asoc.refcnt, 1); sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, - so_locked); + false, so_locked); SCTP_TCB_LOCK(stcb); atomic_subtract_int(&stcb->asoc.refcnt, 1); return (SCTP_RETRAN_EXIT); @@ -10367,7 +10385,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp, no_fragmentflg, 0, 0, inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), net->port, NULL, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) 0, 0, #endif so_locked))) { @@ -10436,10 +10454,8 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp, * also double the output queue size, since this * get shrunk when we free by this amount. */ - atomic_add_int(&((asoc)->total_output_queue_size),data_list[i]->book_size); + atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size); data_list[i]->book_size *= 2; - - } else { if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, @@ -10477,7 +10493,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp, * t3-expiring. */ sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, - SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2); + SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2); sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); } } @@ -10536,11 +10552,7 @@ void sctp_chunk_output(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_where, - int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ) + int so_locked) { /*- * Ok this is the generic chunk service queue. we must do the @@ -10567,7 +10579,7 @@ sctp_chunk_output(struct sctp_inpcb *inp, int fr_done; unsigned int tot_frs = 0; -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (so_locked) { sctp_lock_assert(SCTP_INP_SO(inp)); } else { @@ -10603,7 +10615,8 @@ do_it_again: */ if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { sctp_send_sack(stcb, so_locked); - (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); + sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, + SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3); } while (asoc->sent_queue_retran_cnt) { /*- @@ -10722,7 +10735,6 @@ do_it_again: } } } - } burst_cnt = 0; do { @@ -10812,27 +10824,18 @@ do_it_again: return; } - int sctp_output( struct sctp_inpcb *inp, -#if defined(__Panda__) - pakhandle_type m, -#else struct mbuf *m, -#endif struct sockaddr *addr, -#if defined(__Panda__) - pakhandle_type control, -#else struct mbuf *control, -#endif -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 +#if defined(__FreeBSD__) && !defined(__Userspace__) struct thread *p, -#elif defined(__Windows__) +#elif defined(_WIN32) && !defined(__Userspace__) PKTHREAD p, #else -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) struct proc *p SCTP_UNUSED, #else struct proc *p, @@ -10854,7 +10857,7 @@ sctp_output( (struct uio *)NULL, m, control, -#if defined(__APPLE__) || defined(__Panda__) +#if defined(__APPLE__) && !defined(__Userspace__) flags #else flags, p @@ -11079,11 +11082,7 @@ sctp_fill_in_rest: } void -sctp_send_sack(struct sctp_tcb *stcb, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif -) +sctp_send_sack(struct sctp_tcb *stcb, int so_locked) { /*- * Queue up a SACK or NR-SACK in the control queue. @@ -11149,7 +11148,7 @@ sctp_send_sack(struct sctp_tcb *stcb, int so_locked if (stcb->asoc.delayed_ack) { sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, - SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3); + SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4); sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL); } else { @@ -11218,7 +11217,7 @@ sctp_send_sack(struct sctp_tcb *stcb, int so_locked if (stcb->asoc.delayed_ack) { sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, - SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4); + SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5); sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL); } else { @@ -11264,7 +11263,7 @@ sctp_send_sack(struct sctp_tcb *stcb, int so_locked if (highest_tsn > asoc->mapping_array_base_tsn) { siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8; } else { - siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8; + siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + highest_tsn + 7) / 8; } } else { sack = NULL; @@ -11348,7 +11347,6 @@ sctp_send_sack(struct sctp_tcb *stcb, int so_locked } if ((type == SCTP_NR_SELECTIVE_ACK) && (limit_reached == 0)) { - mergeable = 0; if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) { @@ -11475,11 +11473,7 @@ sctp_send_sack(struct sctp_tcb *stcb, int so_locked } void -sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ) +sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked) { struct mbuf *m_abort, *m, *m_last; struct mbuf *m_out, *m_end = NULL; @@ -11491,7 +11485,7 @@ sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked int error; uint16_t cause_len, chunk_len, padding_len; -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (so_locked) { sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep)); } else { @@ -11572,7 +11566,7 @@ sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0, stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag), stcb->asoc.primary_destination->port, NULL, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) 0, 0, #endif so_locked))) { @@ -11622,7 +11616,7 @@ sctp_send_shutdown_complete(struct sctp_tcb *stcb, stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag), net->port, NULL, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) 0, 0, #endif SCTP_SO_NOT_LOCKED))) { @@ -11638,7 +11632,7 @@ sctp_send_shutdown_complete(struct sctp_tcb *stcb, return; } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) static void sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, uint32_t vtag, @@ -11653,11 +11647,7 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, uint32_t vrf_id SCTP_UNUSED, uint16_t port) #endif { -#ifdef __Panda__ - pakhandle_type o_pak; -#else struct mbuf *o_pak; -#endif struct mbuf *mout; struct sctphdr *shout; struct sctp_chunkhdr *ch; @@ -11666,7 +11656,7 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, #endif int ret, len, cause_len, padding_len; #ifdef INET -#if defined(__APPLE__) || defined(__Panda__) +#if defined(__APPLE__) && !defined(__Userspace__) sctp_route_t ro; #endif struct sockaddr_in *src_sin, *dst_sin; @@ -11721,7 +11711,7 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, len += sizeof(struct udphdr); } #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA); #else @@ -11736,7 +11726,7 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, } return; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) SCTP_BUF_RESV_UF(mout, max_linkhdr); #else @@ -11747,7 +11737,7 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, #endif SCTP_BUF_LEN(mout) = len; SCTP_BUF_NEXT(mout) = cause; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) M_SETFIB(mout, fibnum); mout->m_pkthdr.flowid = mflowid; M_HASHTYPE_SET(mout, mflowtype); @@ -11767,18 +11757,16 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, ip->ip_v = IPVERSION; ip->ip_hl = (sizeof(struct ip) >> 2); ip->ip_tos = 0; -#if defined(__FreeBSD__) -#if __FreeBSD_version >= 1000000 +#if defined(__FreeBSD__) && !defined(__Userspace__) ip->ip_off = htons(IP_DF); -#else - ip->ip_off = IP_DF; -#endif -#elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) || defined(__Userspace_os_Darwin) +#elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) ip->ip_off = IP_DF; #else ip->ip_off = htons(IP_DF); #endif -#if defined(__FreeBSD__) +#if defined(__Userspace__) + ip->ip_id = htons(ip_id++); +#elif defined(__FreeBSD__) ip_fillid(ip); #elif defined(__APPLE__) #if RANDOM_IP_ID @@ -11786,8 +11774,6 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, #else ip->ip_id = htons(ip_id++); #endif -#elif defined(__Userspace__) - ip->ip_id = htons(ip_id++); #else ip->ip_id = ip_id++; #endif @@ -11810,7 +11796,7 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, dst_sin6 = (struct sockaddr_in6 *)dst; ip6 = mtod(mout, struct ip6_hdr *); ip6->ip6_flow = htonl(0x60000000); -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (V_ip6_auto_flowlabel) { ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK); } @@ -11884,16 +11870,13 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, switch (dst->sa_family) { #ifdef INET case AF_INET: -#if defined(__APPLE__) || defined(__Panda__) +#if defined(__APPLE__) && !defined(__Userspace__) /* zap the stack pointer to the route */ memset(&ro, 0, sizeof(sctp_route_t)); -#if defined(__Panda__) - ro._l_addr.sa.sa_family = AF_INET; -#endif #endif if (port) { -#if !defined(__Windows__) && !defined(__Userspace__) -#if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000) +#if !defined(_WIN32) && !defined(__Userspace__) +#if defined(__FreeBSD__) if (V_udp_cksum) { udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); } else { @@ -11906,12 +11889,8 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, udp->uh_sum = 0; #endif } -#if defined(__FreeBSD__) -#if __FreeBSD_version >= 1000000 +#if defined(__FreeBSD__) && !defined(__Userspace__) ip->ip_len = htons(len); -#else - ip->ip_len = len; -#endif #elif defined(__APPLE__) || defined(__Userspace__) ip->ip_len = len; #else @@ -11920,8 +11899,8 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, if (port) { shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr)); SCTP_STAT_INCR(sctps_sendswcrc); -#if !defined(__Windows__) && !defined(__Userspace__) -#if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000) +#if !defined(_WIN32) && !defined(__Userspace__) +#if defined(__FreeBSD__) if (V_udp_cksum) { SCTP_ENABLE_UDP_CSUM(o_pak); } @@ -11930,7 +11909,7 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, #endif #endif } else { -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 +#if defined(__FreeBSD__) && !defined(__Userspace__) mout->m_pkthdr.csum_flags = CSUM_SCTP; mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum); SCTP_STAT_INCR(sctps_sendhwcrc); @@ -11944,7 +11923,7 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, sctp_packet_log(o_pak); } #endif -#if defined(__APPLE__) || defined(__Panda__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id); /* Free the route if we got one back */ if (ro.ro_rt) { @@ -11952,7 +11931,7 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, ro.ro_rt = NULL; } #else -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) SCTP_PROBE5(send, NULL, NULL, ip, NULL, shout); #endif SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id); @@ -11965,20 +11944,18 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, if (port) { shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); SCTP_STAT_INCR(sctps_sendswcrc); -#if defined(__Windows__) +#if !defined(__Userspace__) +#if defined(_WIN32) udp->uh_sum = 0; -#elif !defined(__Userspace__) +#else if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) { udp->uh_sum = 0xffff; } #endif - } else { -#if defined(__FreeBSD__) && __FreeBSD_version >= 900000 -#if __FreeBSD_version > 901000 - mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6; -#else - mout->m_pkthdr.csum_flags = CSUM_SCTP; #endif + } else { +#if defined(__FreeBSD__) && !defined(__Userspace__) + mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6; mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum); SCTP_STAT_INCR(sctps_sendhwcrc); #else @@ -11991,7 +11968,7 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, sctp_packet_log(o_pak); } #endif -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) SCTP_PROBE5(send, NULL, NULL, ip6, NULL, shout); #endif SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id); @@ -12035,7 +12012,7 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, return; } SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret); -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (port) { UDPSTAT_INC(udps_opackets); } @@ -12052,24 +12029,20 @@ sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, void sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, #endif uint32_t vrf_id, uint16_t port) { sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, fibnum, #endif vrf_id, port); } void -sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif -) +sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked) { struct sctp_tmit_chunk *chk; struct sctp_heartbeat_chunk *hb; @@ -12130,7 +12103,7 @@ sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked /* Fill out hb parameter */ hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO); hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param)); - hb->heartbeat.hb_info.time_value_1 = now.tv_sec; + hb->heartbeat.hb_info.time_value_1 = (uint32_t)now.tv_sec; hb->heartbeat.hb_info.time_value_2 = now.tv_usec; /* Did our user request this one, put it in */ hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family; @@ -12998,21 +12971,29 @@ sctp_send_str_reset_req(struct sctp_tcb *stcb, stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1); for (i = 0; i < stcb->asoc.streamoutcnt; i++) { TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); + /* FIX ME FIX ME */ + /* This should be a SS_COPY operation FIX ME STREAM SCHEDULER EXPERT */ + stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]); stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues; +#if defined(SCTP_DETAILED_STR_STATS) + for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { + stcb->asoc.strmout[i].abandoned_sent[j] = oldstream[i].abandoned_sent[j]; + stcb->asoc.strmout[i].abandoned_unsent[j] = oldstream[i].abandoned_unsent[j]; + } +#else + stcb->asoc.strmout[i].abandoned_sent[0] = oldstream[i].abandoned_sent[0]; + stcb->asoc.strmout[i].abandoned_unsent[0] = oldstream[i].abandoned_unsent[0]; +#endif stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered; stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered; stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete; stcb->asoc.strmout[i].sid = i; stcb->asoc.strmout[i].state = oldstream[i].state; - /* FIX ME FIX ME */ - /* This should be a SS_COPY operation FIX ME STREAM SCHEDULER EXPERT */ - stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]); /* now anything on those queues? */ TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) { TAILQ_REMOVE(&oldstream[i].outqueue, sp, next); TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next); } - } /* now the new streams */ stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1); @@ -13077,7 +13058,7 @@ skip_stuff: void sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, uint32_t vtag, struct mbuf *cause, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, #endif uint32_t vrf_id, uint16_t port) @@ -13089,7 +13070,7 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockadd return; } sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, fibnum, #endif vrf_id, port); @@ -13099,13 +13080,13 @@ sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockadd void sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, uint32_t vtag, struct mbuf *cause, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, #endif uint32_t vrf_id, uint16_t port) { sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, fibnum, #endif vrf_id, port); @@ -13115,27 +13096,14 @@ sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst, static struct mbuf * sctp_copy_resume(struct uio *uio, int max_send_len, -#if (defined(__FreeBSD__) && __FreeBSD_version > 602000) || defined(__Userspace__) +#if defined(__FreeBSD__) || defined(__Userspace__) int user_marks_eor, #endif int *error, uint32_t *sndout, struct mbuf **new_tail) { -#if defined(__Panda__) - struct mbuf *m; - - m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0, - (user_marks_eor ? M_EOR : 0)); - if (m == NULL) { - SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); - *error = ENOBUFS; - } else { - *sndout = m_length(m, NULL); - *new_tail = m_last(m); - } - return (m); -#elif defined(__FreeBSD__) && __FreeBSD_version > 602000 || defined(__Userspace__) +#if defined(__FreeBSD__) || defined(__Userspace__) struct mbuf *m; m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0, @@ -13152,7 +13120,7 @@ sctp_copy_resume(struct uio *uio, int left, cancpy, willcpy; struct mbuf *m, *head; -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) left = (int)min(uio->uio_resid, max_send_len); #else @@ -13218,17 +13186,7 @@ sctp_copy_one(struct sctp_stream_queue_pending *sp, struct uio *uio, int resv_upfront) { -#if defined(__Panda__) - sp->data = m_uiotombuf(uio, M_WAITOK, sp->length, - resv_upfront, 0); - if (sp->data == NULL) { - SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); - return (ENOBUFS); - } - - sp->tail_mbuf = m_last(sp->data); - return (0); -#elif defined(__FreeBSD__) && __FreeBSD_version > 602000 || defined(__Userspace__) +#if defined(__FreeBSD__) || defined(__Userspace__) sp->data = m_uiotombuf(uio, M_WAITOK, sp->length, resv_upfront, 0); if (sp->data == NULL) { @@ -13293,8 +13251,6 @@ sctp_copy_one(struct sctp_stream_queue_pending *sp, #endif } - - static struct sctp_stream_queue_pending * sctp_copy_it_in(struct sctp_tcb *stcb, struct sctp_association *asoc, @@ -13343,7 +13299,7 @@ sctp_copy_it_in(struct sctp_tcb *stcb, (void)SCTP_GETTIME_TIMEVAL(&sp->ts); sp->sid = srcv->sinfo_stream; -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) sp->length = (uint32_t)min(uio->uio_resid, max_send_len); #else @@ -13352,7 +13308,7 @@ sctp_copy_it_in(struct sctp_tcb *stcb, #else sp->length = (uint32_t)min(uio->uio_resid, max_send_len); #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) if ((sp->length == (uint32_t)uio->uio_resid) && #else @@ -13385,11 +13341,11 @@ sctp_copy_it_in(struct sctp_tcb *stcb, sctp_auth_key_acquire(stcb, sp->auth_keyid); sp->holds_key_ref = 1; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0); #endif *error = sctp_copy_one(sp, uio, resv_in_first); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0); #endif skip_copy: @@ -13415,25 +13371,19 @@ out_now: return (sp); } - int sctp_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, -#ifdef __Panda__ - pakhandle_type top, - pakhandle_type icontrol, -#else struct mbuf *top, struct mbuf *control, -#endif -#if defined(__APPLE__) || defined(__Panda__) +#if defined(__APPLE__) && !defined(__Userspace__) int flags #else int flags, -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 +#if defined(__FreeBSD__) && !defined(__Userspace__) struct thread *p -#elif defined(__Windows__) +#elif defined(_WIN32) && !defined(__Userspace__) PKTHREAD p #else #if defined(__Userspace__) @@ -13447,10 +13397,7 @@ sctp_sosend(struct socket *so, #endif ) { -#ifdef __Panda__ - struct mbuf *control = NULL; -#endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) struct proc *p = current_proc(); #endif int error, use_sndinfo = 0; @@ -13460,12 +13407,6 @@ sctp_sosend(struct socket *so, struct sockaddr_in sin; #endif -#if defined(__APPLE__) - SCTP_SOCKET_LOCK(so, 1); -#endif -#ifdef __Panda__ - control = SCTP_HEADER_TO_CHAIN(icontrol); -#endif if (control) { /* process cmsg snd/rcv info (maybe a assoc-id) */ if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control, @@ -13476,9 +13417,15 @@ sctp_sosend(struct socket *so, } addr_to_use = addr; #if defined(INET) && defined(INET6) - if ((addr) && (addr->sa_family == AF_INET6)) { + if ((addr != NULL) && (addr->sa_family == AF_INET6)) { struct sockaddr_in6 *sin6; +#ifdef HAVE_SA_LEN + if (addr->sa_len != sizeof(struct sockaddr_in6)) { + SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); + return (EINVAL); + } +#endif sin6 = (struct sockaddr_in6 *)addr; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { in6_sin6_2_sin(&sin, sin6); @@ -13486,43 +13433,36 @@ sctp_sosend(struct socket *so, } } #endif - error = sctp_lower_sosend(so, addr_to_use, uio, top, -#ifdef __Panda__ - icontrol, -#else - control, +#if defined(__APPLE__) && !defined(__Userspace__) + SCTP_SOCKET_LOCK(so, 1); #endif + error = sctp_lower_sosend(so, addr_to_use, uio, top, + control, flags, use_sndinfo ? &sndrcvninfo: NULL -#if !(defined(__Panda__) || defined(__Userspace__)) +#if !defined(__Userspace__) , p #endif ); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif return (error); } - int sctp_lower_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, -#ifdef __Panda__ - pakhandle_type i_pak, - pakhandle_type i_control, -#else struct mbuf *i_pak, struct mbuf *control, -#endif int flags, struct sctp_sndrcvinfo *srcv -#if !(defined( __Panda__) || defined(__Userspace__)) +#if !defined(__Userspace__) , -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 +#if defined(__FreeBSD__) struct thread *p -#elif defined(__Windows__) +#elif defined(_WIN32) PKTHREAD p #else struct proc *p @@ -13530,12 +13470,12 @@ sctp_lower_sosend(struct socket *so, #endif ) { - ssize_t sndlen = 0, max_len, local_add_more; - int error, len; - struct mbuf *top = NULL; -#ifdef __Panda__ - struct mbuf *control = NULL; +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct epoch_tracker et; #endif + ssize_t sndlen = 0, max_len, local_add_more; + int error; + struct mbuf *top = NULL; int queue_only = 0, queue_only_for_init = 0; int free_cnt_applied = 0; int un_sent; @@ -13565,7 +13505,7 @@ sctp_lower_sosend(struct socket *so, stcb = NULL; asoc = NULL; -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) sctp_lock_assert(so); #endif t_inp = inp = (struct sctp_inpcb *)so->so_pcb; @@ -13584,7 +13524,7 @@ sctp_lower_sosend(struct socket *so, user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); atomic_add_int(&inp->total_sends, 1); if (uio) { -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) if (uio->uio_resid < 0) { #else @@ -13596,7 +13536,7 @@ sctp_lower_sosend(struct socket *so, SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); return (EINVAL); } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) sndlen = uio->uio_resid; #else @@ -13607,39 +13547,11 @@ sctp_lower_sosend(struct socket *so, #endif } else { top = SCTP_HEADER_TO_CHAIN(i_pak); -#ifdef __Panda__ - /*- - * app len indicates the datalen, dgsize for cases - * of SCTP_EOF/ABORT will not have the right len - */ - sndlen = SCTP_APP_DATA_LEN(i_pak); - /*- - * Set the particle len also to zero to match - * up with app len. We only have one particle - * if app len is zero for Panda. This is ensured - * in the socket lib - */ - if (sndlen == 0) { - SCTP_BUF_LEN(top) = 0; - } - /*- - * We delink the chain from header, but keep - * the header around as we will need it in - * EAGAIN case - */ - SCTP_DETACH_HEADER_FROM_CHAIN(i_pak); -#else sndlen = SCTP_HEADER_LEN(i_pak); -#endif } - SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %zu\n", - (void *)addr, + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %zd\n", + (void *)addr, sndlen); -#ifdef __Panda__ - if (i_control) { - control = SCTP_HEADER_TO_CHAIN(i_control); - } -#endif if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && SCTP_IS_LISTENING(inp)) { /* The listener can NOT send */ @@ -13713,7 +13625,7 @@ sctp_lower_sosend(struct socket *so, sinfo_flags = inp->def_send.sinfo_flags; sinfo_assoc_id = inp->def_send.sinfo_assoc_id; } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (flags & MSG_EOR) { sinfo_flags |= SCTP_EOR; } @@ -13775,7 +13687,6 @@ sctp_lower_sosend(struct socket *so, SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); error = EINVAL; goto out_unlocked; - } if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && (addr->sa_family == AF_INET6)) { @@ -13821,8 +13732,8 @@ sctp_lower_sosend(struct socket *so, if ((sinfo_flags & SCTP_ABORT) || ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) { /*- - * User asks to abort a non-existant assoc, - * or EOF a non-existant assoc with no data + * User asks to abort a non-existent assoc, + * or EOF a non-existent assoc with no data */ SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT); error = ENOENT; @@ -13835,24 +13746,19 @@ sctp_lower_sosend(struct socket *so, panic("Error, should hold create lock and I don't?"); } #endif - stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, - inp->sctp_ep.pre_open_stream_count, - inp->sctp_ep.port, -#if !(defined( __Panda__) || defined(__Userspace__)) - p, + stcb = sctp_aloc_assoc_connected(inp, addr, &error, 0, 0, vrf_id, + inp->sctp_ep.pre_open_stream_count, + inp->sctp_ep.port, +#if !defined(__Userspace__) + p, #else - (struct proc *)NULL, + (struct proc *)NULL, #endif - SCTP_INITIALIZE_AUTH_PARAMS); + SCTP_INITIALIZE_AUTH_PARAMS); if (stcb == NULL) { /* Error is setup for us in the call */ goto out_unlocked; } - if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { - stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; - /* Set the connected flag so we can queue data */ - soisconnecting(so); - } hold_tcblock = 1; if (create_lock_applied) { SCTP_ASOC_CREATE_UNLOCK(inp); @@ -13868,8 +13774,8 @@ sctp_lower_sosend(struct socket *so, if (control) { if (sctp_process_cmsgs_for_init(stcb, control, &error)) { - sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, - SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5); + sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, + SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6); hold_tcblock = 0; stcb = NULL; goto out_unlocked; @@ -13889,7 +13795,7 @@ sctp_lower_sosend(struct socket *so, if (srcv == NULL) { srcv = (struct sctp_sndrcvinfo *)&asoc->def_send; sinfo_flags = srcv->sinfo_flags; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (flags & MSG_EOR) { sinfo_flags |= SCTP_EOR; } @@ -13934,7 +13840,7 @@ sctp_lower_sosend(struct socket *so, } #endif if (SCTP_SO_IS_NBIO(so) -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 +#if defined(__FreeBSD__) && !defined(__Userspace__) || (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0 #endif ) { @@ -13967,7 +13873,7 @@ sctp_lower_sosend(struct socket *so, SCTP_TCB_UNLOCK(stcb); hold_tcblock = 0; } else { - atomic_add_int(&stcb->asoc.sb_send_resv, sndlen); + atomic_add_int(&stcb->asoc.sb_send_resv, (int)sndlen); } local_soresv = sndlen; if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { @@ -14021,12 +13927,10 @@ sctp_lower_sosend(struct socket *so, } } /* Ok, we will attempt a msgsnd :> */ -#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)) +#if !(defined(_WIN32) || defined(__Userspace__)) if (p) { -#if defined(__FreeBSD__) && __FreeBSD_version >= 603000 +#if defined(__FreeBSD__) p->td_ru.ru_msgsnd++; -#elif defined(__FreeBSD__) && __FreeBSD_version >= 500000 - p->td_proc->p_stats->p_ru.ru_msgsnd++; #else p->p_stats->p_ru.ru_msgsnd++; #endif @@ -14091,11 +13995,11 @@ sctp_lower_sosend(struct socket *so, ph++; SCTP_BUF_LEN(mm) = (int)(tot_out + sizeof(struct sctp_paramhdr)); if (top == NULL) { -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 0); #endif error = uiomove((caddr_t)ph, (int)tot_out, uio); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_LOCK(so, 0); #endif if (error) { @@ -14119,7 +14023,13 @@ sctp_lower_sosend(struct socket *so, atomic_add_int(&stcb->asoc.refcnt, -1); free_cnt_applied = 0; /* release this lock, otherwise we hang on ourselves */ - sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif + sctp_abort_an_association(stcb->sctp_ep, stcb, mm, false, SCTP_SO_LOCKED); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif /* now relock the stcb so everything is sane */ hold_tcblock = 0; stcb = NULL; @@ -14178,7 +14088,6 @@ sctp_lower_sosend(struct socket *so, */ local_add_more = sndlen; } - len = 0; if (non_blocking) { goto skip_preblock; } @@ -14202,7 +14111,7 @@ sctp_lower_sosend(struct socket *so, sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen); } be.error = 0; -#if !defined(__Panda__) && !defined(__Windows__) +#if !(defined(_WIN32) && !defined(__Userspace__)) stcb->block_entry = &be; #endif error = sbwait(&so->so_snd); @@ -14240,7 +14149,7 @@ skip_preblock: if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { goto out_unlocked; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) error = sblock(&so->so_snd, SBLOCKWAIT(flags)); #endif /* sndlen covers for mbuf case @@ -14270,11 +14179,10 @@ skip_preblock: error = EINVAL; goto out; } - SCTP_TCB_SEND_UNLOCK(stcb); - strm = &stcb->asoc.strmout[srcv->sinfo_stream]; if (strm->last_msg_incomplete == 0) { do_a_copy_in: + SCTP_TCB_SEND_UNLOCK(stcb); sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error); if (error) { goto out; @@ -14299,13 +14207,11 @@ skip_preblock: if (sinfo_flags & SCTP_UNORDERED) { SCTP_STAT_INCR(sctps_sends_with_unord); } + sp->processing = 1; TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1); - SCTP_TCB_SEND_UNLOCK(stcb); } else { - SCTP_TCB_SEND_LOCK(stcb); sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead); - SCTP_TCB_SEND_UNLOCK(stcb); if (sp == NULL) { /* ???? Huh ??? last msg is gone */ #ifdef INVARIANTS @@ -14315,10 +14221,18 @@ skip_preblock: strm->last_msg_incomplete = 0; #endif goto do_a_copy_in; - + } + if (sp->processing) { + SCTP_TCB_SEND_UNLOCK(stcb); + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); + error = EINVAL; + goto out; + } else { + sp->processing = 1; } } -#if defined(__APPLE__) + SCTP_TCB_SEND_UNLOCK(stcb); +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) while (uio->uio_resid > 0) { #else @@ -14338,7 +14252,7 @@ skip_preblock: if ((max_len > (ssize_t)SCTP_BASE_SYSCTL(sctp_add_more_threshold)) || (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) || -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) (uio->uio_resid && (uio->uio_resid <= max_len))) { #else @@ -14353,31 +14267,39 @@ skip_preblock: SCTP_TCB_UNLOCK(stcb); hold_tcblock = 0; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 0); #endif -#if (defined(__FreeBSD__) && __FreeBSD_version > 602000) || defined(__Userspace__) +#if defined(__FreeBSD__) || defined(__Userspace__) mm = sctp_copy_resume(uio, (int)max_len, user_marks_eor, &error, &sndout, &new_tail); #else mm = sctp_copy_resume(uio, (int)max_len, &error, &sndout, &new_tail); #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_LOCK(so, 0); #endif if ((mm == NULL) || error) { if (mm) { sctp_m_freem(mm); } + SCTP_TCB_SEND_LOCK(stcb); + if (((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) && + ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) == 0) && + (sp != NULL)) { + sp->processing = 0; + } + SCTP_TCB_SEND_UNLOCK(stcb); goto out; } /* Update the mbuf and count */ SCTP_TCB_SEND_LOCK(stcb); - if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { + if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) || + (stcb->asoc.state & SCTP_STATE_WAS_ABORTED)) { /* we need to get out. * Peer probably aborted. */ sctp_m_freem(mm); - if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) { + if (stcb->asoc.state & SCTP_STATE_WAS_ABORTED) { SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); error = ECONNRESET; } @@ -14395,13 +14317,12 @@ skip_preblock: } sctp_snd_sb_alloc(stcb, sndout); atomic_add_int(&sp->length, sndout); - len += sndout; if (sinfo_flags & SCTP_SACK_IMMEDIATELY) { sp->sinfo_flags |= SCTP_SACK_IMMEDIATELY; } /* Did we reach EOR? */ -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) if ((uio->uio_resid == 0) && #else @@ -14419,7 +14340,7 @@ skip_preblock: } SCTP_TCB_SEND_UNLOCK(stcb); } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) if (uio->uio_resid == 0) { #else @@ -14453,6 +14374,11 @@ skip_preblock: /* wait for space now */ if (non_blocking) { /* Non-blocking io in place out */ + SCTP_TCB_SEND_LOCK(stcb); + if (sp != NULL) { + sp->processing = 0; + } + SCTP_TCB_SEND_UNLOCK(stcb); goto skip_out_eof; } /* What about the INIT, send it maybe */ @@ -14465,7 +14391,13 @@ skip_preblock: /* a collision took us forward? */ queue_only = 0; } else { +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); queue_only = 1; } @@ -14486,7 +14418,6 @@ skip_preblock: (stcb->asoc.total_flight > 0) && (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) && (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) { - /*- * Ok, Nagle is set on and we have data outstanding. * Don't send anything and let SACKs drive out the @@ -14506,7 +14437,6 @@ skip_preblock: nagle_applies = 0; } if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { - sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, nagle_applies, un_sent); sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, @@ -14523,6 +14453,9 @@ skip_preblock: * the input via the net is happening * and I don't need to start output :-D */ +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif if (hold_tcblock == 0) { if (SCTP_TCB_TRYLOCK(stcb)) { hold_tcblock = 1; @@ -14535,6 +14468,9 @@ skip_preblock: stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); } +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif } if (hold_tcblock == 1) { SCTP_TCB_UNLOCK(stcb); @@ -14559,7 +14495,7 @@ skip_preblock: if (SCTP_SB_LIMIT_SND(so) <= (inqueue_bytes + min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) { if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, asoc, uio->uio_resid); @@ -14573,10 +14509,10 @@ skip_preblock: #endif } be.error = 0; -#if !defined(__Panda__) && !defined(__Windows__) +#if !(defined(_WIN32) && !defined(__Userspace__)) stcb->block_entry = &be; #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) sbunlock(&so->so_snd, 1); #endif error = sbwait(&so->so_snd); @@ -14591,10 +14527,17 @@ skip_preblock: } } SOCKBUF_UNLOCK(&so->so_snd); + SCTP_TCB_SEND_LOCK(stcb); + if (((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) && + ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) == 0) && + (sp != NULL)) { + sp->processing = 0; + } + SCTP_TCB_SEND_UNLOCK(stcb); goto out_unlocked; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) error = sblock(&so->so_snd, SBLOCKWAIT(flags)); #endif if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { @@ -14603,12 +14546,17 @@ skip_preblock: } } SOCKBUF_UNLOCK(&so->so_snd); - if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { + SCTP_TCB_SEND_LOCK(stcb); + if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) || + (stcb->asoc.state & SCTP_STATE_WAS_ABORTED)) { + SCTP_TCB_SEND_UNLOCK(stcb); goto out_unlocked; } + SCTP_TCB_SEND_UNLOCK(stcb); } SCTP_TCB_SEND_LOCK(stcb); - if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { + if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) || + (stcb->asoc.state & SCTP_STATE_WAS_ABORTED)) { SCTP_TCB_SEND_UNLOCK(stcb); goto out_unlocked; } @@ -14624,13 +14572,14 @@ skip_preblock: strm->last_msg_incomplete = 0; asoc->stream_locked = 0; } + sp->processing = 0; } else { SCTP_PRINTF("Huh no sp TSNH?\n"); strm->last_msg_incomplete = 0; asoc->stream_locked = 0; } SCTP_TCB_SEND_UNLOCK(stcb); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) if (uio->uio_resid == 0) { #else @@ -14646,12 +14595,6 @@ skip_preblock: error = sctp_msg_append(stcb, net, top, srcv, 0); top = NULL; if (sinfo_flags & SCTP_EOF) { - /* - * This should only happen for Panda for the mbuf - * send case, which does NOT yet support EEOR mode. - * Thus, we can just set this flag to do the proper - * EOF handling. - */ got_all_of_the_send = 1; } } @@ -14695,7 +14638,7 @@ dataless_eof: sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, - asoc->primary_destination); + NULL); } } else { /*- @@ -14730,19 +14673,25 @@ dataless_eof: atomic_add_int(&stcb->asoc.refcnt, -1); free_cnt_applied = 0; } - snprintf(msg, sizeof(msg), - "%s:%d at %s", __FILE__, __LINE__, __func__); + SCTP_SNPRINTF(msg, sizeof(msg), + "%s:%d at %s", __FILE__, __LINE__, __func__); op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), msg); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif sctp_abort_an_association(stcb->sctp_ep, stcb, - op_err, SCTP_SO_LOCKED); + op_err, false, SCTP_SO_LOCKED); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif /* now relock the stcb so everything is sane */ hold_tcblock = 0; stcb = NULL; goto out; } sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, - asoc->primary_destination); + NULL); sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY); } } @@ -14760,7 +14709,13 @@ skip_out_eof: /* a collision took us forward? */ queue_only = 0; } else { +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); queue_only = 1; } @@ -14806,6 +14761,9 @@ skip_out_eof: stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count); } +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) { /* we can attempt to send too. */ if (hold_tcblock == 0) { @@ -14838,19 +14796,22 @@ skip_out_eof: (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED); } +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n", queue_only, stcb->asoc.peers_rwnd, un_sent, stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue, stcb->asoc.total_output_queue_size, error); out: -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) sbunlock(&so->so_snd, 1); #endif out_unlocked: if (local_soresv && stcb) { - atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen); + atomic_subtract_int(&stcb->asoc.sb_send_resv, (int)sndlen); } if (create_lock_applied) { SCTP_ASOC_CREATE_UNLOCK(inp); @@ -14862,7 +14823,7 @@ out_unlocked: atomic_add_int(&stcb->asoc.refcnt, -1); } #ifdef INVARIANTS -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (stcb) { if (mtx_owned(&stcb->tcb_mtx)) { panic("Leaving with tcb mtx owned?"); @@ -14872,33 +14833,6 @@ out_unlocked: } } #endif -#endif -#ifdef __Panda__ - /* - * Handle the EAGAIN/ENOMEM cases to reattach the pak header - * to particle when pak is passed in, so that caller - * can try again with this pak - * - * NOTE: For other cases, including success case, - * we simply want to return the header back to free - * pool - */ - if (top) { - if ((error == EAGAIN) || (error == ENOMEM)) { - SCTP_ATTACH_CHAIN(i_pak, top, sndlen); - top = NULL; - } else { - (void)SCTP_RELEASE_HEADER(i_pak); - } - } else { - /* This is to handle cases when top has - * been reset to NULL but pak might not - * be freed - */ - if (i_pak) { - (void)SCTP_RELEASE_HEADER(i_pak); - } - } #endif if (top) { sctp_m_freem(top); @@ -14909,7 +14843,6 @@ out_unlocked: return (error); } - /* * generate an AUTHentication chunk, if required */ @@ -14968,7 +14901,7 @@ sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end, return (m); } -#if defined(__FreeBSD__) || defined(__APPLE__) +#if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__) #ifdef INET6 int sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro) @@ -14977,7 +14910,11 @@ sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro) struct nd_pfxrouter *pfxrtr = NULL; struct sockaddr_in6 gw6; +#if defined(__FreeBSD__) + if (ro == NULL || ro->ro_nh == NULL || src6->sin6_family != AF_INET6) +#else if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6) +#endif return (0); /* get prefix entry of address */ @@ -15016,10 +14953,16 @@ sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro) SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is "); SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6); SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is "); - SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway); - if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) { #if defined(__FreeBSD__) + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa); +#else + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway); +#endif +#if defined(__FreeBSD__) + if (sctp_cmpaddr((struct sockaddr *)&gw6, &ro->ro_nh->gw_sa)) { ND6_RUNLOCK(); +#else + if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) { #endif SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n"); return (1); @@ -15041,7 +14984,11 @@ sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro) struct ifaddr *ifa; struct in_addr srcnetaddr, gwnetaddr; +#if defined(__FreeBSD__) + if (ro == NULL || ro->ro_nh == NULL || +#else if (ro == NULL || ro->ro_rt == NULL || +#endif sifa->address.sa.sa_family != AF_INET) { return (0); } @@ -15053,10 +15000,18 @@ sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro) SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa); SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr); +#if defined(__FreeBSD__) + sin = &ro->ro_nh->gw4_sa; +#else sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway; +#endif gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr); SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is "); +#if defined(__FreeBSD__) + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa); +#else SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway); +#endif SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr); if (srcnetaddr.s_addr == gwnetaddr.s_addr) { return (1); diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_output.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_output.h index cc5872725..dad10e00a 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_output.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_output.h @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_output.h 351654 2019-09-01 10:09:53Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_output.h 366114 2020-09-24 12:26:06Z tuexen $"); #endif #ifndef _NETINET_SCTP_OUTPUT_H_ @@ -44,45 +44,37 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_output.h 351654 2019-09-01 10:09:53Z t #if defined(_KERNEL) || defined(__Userspace__) - struct mbuf * sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb, - struct sctp_scoping *scope, - struct mbuf *m_at, - int cnt_inits_to, - uint16_t *padding_len, uint16_t *chunk_len); - + struct sctp_scoping *scope, + struct mbuf *m_at, + int cnt_inits_to, + uint16_t *padding_len, uint16_t *chunk_len); int sctp_is_addr_restricted(struct sctp_tcb *, struct sctp_ifa *); - int sctp_is_address_in_scope(struct sctp_ifa *ifa, struct sctp_scoping *scope, - int do_update); + int do_update); int sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa); struct sctp_ifa * sctp_source_address_selection(struct sctp_inpcb *inp, - struct sctp_tcb *stcb, - sctp_route_t *ro, struct sctp_nets *net, - int non_asoc_addr_ok, uint32_t vrf_id); + struct sctp_tcb *stcb, + sctp_route_t *ro, struct sctp_nets *net, + int non_asoc_addr_ok, uint32_t vrf_id); #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__) -int -sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro); -int -sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro); +int sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro); + +int sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro); #endif -void sctp_send_initiate(struct sctp_inpcb *, struct sctp_tcb *, int -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ); +void sctp_send_initiate(struct sctp_inpcb *, struct sctp_tcb *, int); void sctp_send_initiate_ack(struct sctp_inpcb *, struct sctp_tcb *, @@ -90,31 +82,30 @@ sctp_send_initiate_ack(struct sctp_inpcb *, struct sctp_tcb *, int, int, struct sockaddr *, struct sockaddr *, struct sctphdr *, struct sctp_init_chunk *, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t, uint32_t, #endif uint32_t, uint16_t); struct mbuf * sctp_arethere_unrecognized_parameters(struct mbuf *, int, int *, - struct sctp_chunkhdr *, int *, int *); + struct sctp_chunkhdr *, int *, int *); void sctp_queue_op_err(struct sctp_tcb *, struct mbuf *); int sctp_send_cookie_echo(struct mbuf *, int, int, struct sctp_tcb *, - struct sctp_nets *); + struct sctp_nets *); void sctp_send_cookie_ack(struct sctp_tcb *); void sctp_send_heartbeat_ack(struct sctp_tcb *, struct mbuf *, int, int, - struct sctp_nets *); + struct sctp_nets *); void sctp_remove_from_wheel(struct sctp_tcb *stcb, - struct sctp_association *asoc, - struct sctp_stream_out *strq, int holds_lock); - + struct sctp_association *asoc, + struct sctp_stream_out *strq, int holds_lock); void sctp_send_shutdown(struct sctp_tcb *, struct sctp_nets *); @@ -124,7 +115,7 @@ void sctp_send_shutdown_complete(struct sctp_tcb *, struct sctp_nets *, int); void sctp_send_shutdown_complete2(struct sockaddr *, struct sockaddr *, struct sctphdr *, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t, uint32_t, uint16_t, #endif uint32_t, uint16_t); @@ -143,16 +134,15 @@ void sctp_fix_ecn_echo(struct sctp_association *); void sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net); - #define SCTP_DATA_CHUNK_OVERHEAD(stcb) ((stcb)->asoc.idata_supported ? \ - sizeof(struct sctp_idata_chunk) : \ - sizeof(struct sctp_data_chunk)) + sizeof(struct sctp_idata_chunk) : \ + sizeof(struct sctp_data_chunk)) -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 +#if defined(__FreeBSD__) && !defined(__Userspace__) int sctp_output(struct sctp_inpcb *, struct mbuf *, struct sockaddr *, struct mbuf *, struct thread *, int); -#elif defined(__Windows__) +#elif defined(_WIN32) && !defined(__Userspace__) sctp_output(struct sctp_inpcb *, struct mbuf *, struct sockaddr *, struct mbuf *, PKTHREAD, int); #else @@ -161,30 +151,15 @@ sctp_output(struct sctp_inpcb *, struct mbuf *, struct sockaddr *, #endif int sctp_output(struct sctp_inpcb *, -#if defined(__Panda__) - pakhandle_type, -#else struct mbuf *, -#endif struct sockaddr *, -#if defined(__Panda__) - pakhandle_type, -#else struct mbuf *, -#endif struct proc *, int); #endif -void sctp_chunk_output(struct sctp_inpcb *, struct sctp_tcb *, int, int -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ); -void sctp_send_abort_tcb(struct sctp_tcb *, struct mbuf *, int -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ); +void sctp_chunk_output(struct sctp_inpcb *, struct sctp_tcb *, int, int); + +void sctp_send_abort_tcb(struct sctp_tcb *, struct mbuf *, int); void send_forward_tsn(struct sctp_tcb *, struct sctp_association *); @@ -194,23 +169,19 @@ void sctp_send_hb(struct sctp_tcb *, struct sctp_nets *, int); void sctp_send_ecn_echo(struct sctp_tcb *, struct sctp_nets *, uint32_t); - void sctp_send_packet_dropped(struct sctp_tcb *, struct sctp_nets *, struct mbuf *, int, int, int); - - void sctp_send_cwr(struct sctp_tcb *, struct sctp_nets *, uint32_t, uint8_t); - void sctp_add_stream_reset_result(struct sctp_tmit_chunk *, uint32_t, uint32_t); void sctp_send_deferred_reset_response(struct sctp_tcb *, - struct sctp_stream_reset_list *, - int); + struct sctp_stream_reset_list *, + int); void sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *, @@ -225,17 +196,18 @@ sctp_send_str_reset_req(struct sctp_tcb *, uint16_t , uint16_t *, void sctp_send_abort(struct mbuf *, int, struct sockaddr *, struct sockaddr *, struct sctphdr *, uint32_t, struct mbuf *, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t, uint32_t, uint16_t, #endif uint32_t, uint16_t); -void sctp_send_operr_to(struct sockaddr *, struct sockaddr *, - struct sctphdr *, uint32_t, struct mbuf *, -#if defined(__FreeBSD__) - uint8_t, uint32_t, uint16_t, +void +sctp_send_operr_to(struct sockaddr *, struct sockaddr *, + struct sctphdr *, uint32_t, struct mbuf *, +#if defined(__FreeBSD__) && !defined(__Userspace__) + uint8_t, uint32_t, uint16_t, #endif - uint32_t, uint16_t); + uint32_t, uint16_t); #endif /* _KERNEL || __Userspace__ */ @@ -244,20 +216,15 @@ int sctp_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, -#ifdef __Panda__ - pakhandle_type top, - pakhandle_type control, -#else struct mbuf *top, struct mbuf *control, -#endif -#if defined(__APPLE__) || defined(__Panda__) +#if defined(__APPLE__) && !defined(__Userspace__) int flags #else int flags, -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 +#if defined(__FreeBSD__) && !defined(__Userspace__) struct thread *p -#elif defined(__Windows__) +#elif defined(_WIN32) && !defined(__Userspace__) PKTHREAD p #else #if defined(__Userspace__) diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_pcb.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_pcb.c index 02779e536..d89f2232c 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_pcb.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_pcb.c @@ -32,13 +32,13 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_pcb.c 356377 2020-01-05 14:06:40Z tuexen $"); +__FBSDID("$FreeBSD$"); #endif #include -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #endif #include @@ -52,7 +52,7 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_pcb.c 356377 2020-01-05 14:06:40Z tuex #include #include #if defined(INET) || defined(INET6) -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) #include #endif #endif @@ -63,7 +63,7 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_pcb.c 356377 2020-01-05 14:06:40Z tuex #include #endif #endif -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #include #include @@ -71,21 +71,15 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_pcb.c 356377 2020-01-05 14:06:40Z tuex #if defined(__Userspace__) #include #include -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) #include #endif #endif -#if defined(__APPLE__) -#define APPLE_FILE_NO 4 -#endif - -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 -VNET_DEFINE(struct sctp_base_info, system_base_info); -#else +#if !defined(__FreeBSD__) || defined(__Userspace__) struct sctp_base_info system_base_info; -#endif +#endif /* FIX: we don't handle multiple link local scopes */ /* "scopeless" replacement IN6_ARE_ADDR_EQUAL */ #ifdef INET6 @@ -93,7 +87,7 @@ int SCTP6_ARE_ADDR_EQUAL(struct sockaddr_in6 *a, struct sockaddr_in6 *b) { #ifdef SCTP_EMBEDDED_V6_SCOPE -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) struct in6_addr tmp_a, tmp_b; tmp_a = a->sin6_addr; @@ -256,7 +250,6 @@ sctp_allocate_vrf(int vrf_id) return (vrf); } - struct sctp_ifn * sctp_find_ifn(void *ifn, uint32_t ifn_index) { @@ -266,6 +259,7 @@ sctp_find_ifn(void *ifn, uint32_t ifn_index) /* We assume the lock is held for the addresses * if that's wrong problems could occur :-) */ + SCTP_IPI_ADDR_LOCK_ASSERT(); hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))]; LIST_FOREACH(sctp_ifnp, hash_ifn_head, next_bucket) { if (sctp_ifnp->ifn_index == ifn_index) { @@ -278,7 +272,6 @@ sctp_find_ifn(void *ifn, uint32_t ifn_index) return (NULL); } - struct sctp_vrf * sctp_find_vrf(uint32_t vrf_id) { @@ -294,7 +287,6 @@ sctp_find_vrf(uint32_t vrf_id) return (NULL); } - void sctp_free_vrf(struct sctp_vrf *vrf) { @@ -310,7 +302,6 @@ sctp_free_vrf(struct sctp_vrf *vrf) } } - void sctp_free_ifn(struct sctp_ifn *sctp_ifnp) { @@ -324,7 +315,6 @@ sctp_free_ifn(struct sctp_ifn *sctp_ifnp) } } - void sctp_update_ifn_mtu(uint32_t ifn_index, uint32_t mtu) { @@ -336,7 +326,6 @@ sctp_update_ifn_mtu(uint32_t ifn_index, uint32_t mtu) } } - void sctp_free_ifa(struct sctp_ifa *sctp_ifap) { @@ -350,7 +339,6 @@ sctp_free_ifa(struct sctp_ifa *sctp_ifap) } } - static void sctp_delete_ifn(struct sctp_ifn *sctp_ifnp, int hold_addr_lock) { @@ -361,19 +349,20 @@ sctp_delete_ifn(struct sctp_ifn *sctp_ifnp, int hold_addr_lock) /* Not in the list.. sorry */ return; } - if (hold_addr_lock == 0) + if (hold_addr_lock == 0) { SCTP_IPI_ADDR_WLOCK(); + } else { + SCTP_IPI_ADDR_WLOCK_ASSERT(); + } LIST_REMOVE(sctp_ifnp, next_bucket); LIST_REMOVE(sctp_ifnp, next_ifn); - SCTP_DEREGISTER_INTERFACE(sctp_ifnp->ifn_index, - sctp_ifnp->registered_af); - if (hold_addr_lock == 0) + if (hold_addr_lock == 0) { SCTP_IPI_ADDR_WUNLOCK(); + } /* Take away the reference, and possibly free it */ sctp_free_ifn(sctp_ifnp); } - void sctp_mark_ifa_addr_down(uint32_t vrf_id, struct sockaddr *addr, const char *if_name, uint32_t ifn_index) @@ -386,7 +375,6 @@ sctp_mark_ifa_addr_down(uint32_t vrf_id, struct sockaddr *addr, if (vrf == NULL) { SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id); goto out; - } sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED); if (sctp_ifap == NULL) { @@ -417,7 +405,6 @@ sctp_mark_ifa_addr_down(uint32_t vrf_id, struct sockaddr *addr, SCTP_IPI_ADDR_RUNLOCK(); } - void sctp_mark_ifa_addr_up(uint32_t vrf_id, struct sockaddr *addr, const char *if_name, uint32_t ifn_index) @@ -430,7 +417,6 @@ sctp_mark_ifa_addr_up(uint32_t vrf_id, struct sockaddr *addr, if (vrf == NULL) { SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id); goto out; - } sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED); if (sctp_ifap == NULL) { @@ -461,7 +447,6 @@ sctp_mark_ifa_addr_up(uint32_t vrf_id, struct sockaddr *addr, SCTP_IPI_ADDR_RUNLOCK(); } - /*- * Add an ifa to an ifn. * Register the interface as necessary. @@ -494,12 +479,10 @@ sctp_add_ifa_to_ifn(struct sctp_ifn *sctp_ifnp, struct sctp_ifa *sctp_ifap) } if (sctp_ifnp->ifa_count == 1) { /* register the new interface */ - SCTP_REGISTER_INTERFACE(sctp_ifnp->ifn_index, ifa_af); sctp_ifnp->registered_af = ifa_af; } } - /*- * Remove an ifa from its ifn. * If no more addresses exist, remove the ifn too. Otherwise, re-register @@ -535,13 +518,9 @@ sctp_remove_ifa_from_ifn(struct sctp_ifa *sctp_ifap) /* re-register address family type, if needed */ if ((sctp_ifap->ifn_p->num_v6 == 0) && (sctp_ifap->ifn_p->registered_af == AF_INET6)) { - SCTP_DEREGISTER_INTERFACE(sctp_ifap->ifn_p->ifn_index, AF_INET6); - SCTP_REGISTER_INTERFACE(sctp_ifap->ifn_p->ifn_index, AF_INET); sctp_ifap->ifn_p->registered_af = AF_INET; } else if ((sctp_ifap->ifn_p->num_v4 == 0) && (sctp_ifap->ifn_p->registered_af == AF_INET)) { - SCTP_DEREGISTER_INTERFACE(sctp_ifap->ifn_p->ifn_index, AF_INET); - SCTP_REGISTER_INTERFACE(sctp_ifap->ifn_p->ifn_index, AF_INET6); sctp_ifap->ifn_p->registered_af = AF_INET6; } /* free the ifn refcount */ @@ -551,7 +530,6 @@ sctp_remove_ifa_from_ifn(struct sctp_ifa *sctp_ifap) } } - struct sctp_ifa * sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index, uint32_t ifn_type, const char *if_name, void *ifa, @@ -559,8 +537,8 @@ sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index, int dynamic_add) { struct sctp_vrf *vrf; - struct sctp_ifn *sctp_ifnp = NULL; - struct sctp_ifa *sctp_ifap = NULL; + struct sctp_ifn *sctp_ifnp, *new_sctp_ifnp; + struct sctp_ifa *sctp_ifap, *new_sctp_ifap; struct sctp_ifalist *hash_addr_head; struct sctp_ifnlist *hash_ifn_head; uint32_t hash_of_addr; @@ -570,6 +548,23 @@ sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index, SCTPDBG(SCTP_DEBUG_PCB4, "vrf_id 0x%x: adding address: ", vrf_id); SCTPDBG_ADDR(SCTP_DEBUG_PCB4, addr); #endif + SCTP_MALLOC(new_sctp_ifnp, struct sctp_ifn *, + sizeof(struct sctp_ifn), SCTP_M_IFN); + if (new_sctp_ifnp == NULL) { +#ifdef INVARIANTS + panic("No memory for IFN"); +#endif + return (NULL); + } + SCTP_MALLOC(new_sctp_ifap, struct sctp_ifa *, sizeof(struct sctp_ifa), SCTP_M_IFA); + if (new_sctp_ifap == NULL) { +#ifdef INVARIANTS + panic("No memory for IFA"); +#endif + SCTP_FREE(new_sctp_ifnp, SCTP_M_IFN); + return (NULL); + } + SCTP_IPI_ADDR_WLOCK(); sctp_ifnp = sctp_find_ifn(ifn, ifn_index); if (sctp_ifnp) { @@ -580,6 +575,8 @@ sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index, vrf = sctp_allocate_vrf(vrf_id); if (vrf == NULL) { SCTP_IPI_ADDR_WUNLOCK(); + SCTP_FREE(new_sctp_ifnp, SCTP_M_IFN); + SCTP_FREE(new_sctp_ifap, SCTP_M_IFA); return (NULL); } } @@ -588,15 +585,8 @@ sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index, /* build one and add it, can't hold lock * until after malloc done though. */ - SCTP_IPI_ADDR_WUNLOCK(); - SCTP_MALLOC(sctp_ifnp, struct sctp_ifn *, - sizeof(struct sctp_ifn), SCTP_M_IFN); - if (sctp_ifnp == NULL) { -#ifdef INVARIANTS - panic("No memory for IFN"); -#endif - return (NULL); - } + sctp_ifnp = new_sctp_ifnp; + new_sctp_ifnp = NULL; memset(sctp_ifnp, 0, sizeof(struct sctp_ifn)); sctp_ifnp->ifn_index = ifn_index; sctp_ifnp->ifn_p = ifn; @@ -606,13 +596,12 @@ sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index, atomic_add_int(&vrf->refcount, 1); sctp_ifnp->ifn_mtu = SCTP_GATHER_MTU_FROM_IFN_INFO(ifn, ifn_index, addr->sa_family); if (if_name != NULL) { - snprintf(sctp_ifnp->ifn_name, SCTP_IFNAMSIZ, "%s", if_name); + SCTP_SNPRINTF(sctp_ifnp->ifn_name, SCTP_IFNAMSIZ, "%s", if_name); } else { - snprintf(sctp_ifnp->ifn_name, SCTP_IFNAMSIZ, "%s", "unknown"); + SCTP_SNPRINTF(sctp_ifnp->ifn_name, SCTP_IFNAMSIZ, "%s", "unknown"); } hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))]; LIST_INIT(&sctp_ifnp->ifalist); - SCTP_IPI_ADDR_WLOCK(); LIST_INSERT_HEAD(hash_ifn_head, sctp_ifnp, next_bucket); LIST_INSERT_HEAD(&vrf->ifnlist, sctp_ifnp, next_ifn); atomic_add_int(&SCTP_BASE_INFO(ipi_count_ifns), 1); @@ -639,6 +628,10 @@ sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index, } exit_stage_left: SCTP_IPI_ADDR_WUNLOCK(); + if (new_sctp_ifnp != NULL) { + SCTP_FREE(new_sctp_ifnp, SCTP_M_IFN); + } + SCTP_FREE(new_sctp_ifap, SCTP_M_IFA); return (sctp_ifap); } else { if (sctp_ifap->ifn_p) { @@ -665,14 +658,7 @@ sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index, goto exit_stage_left; } } - SCTP_IPI_ADDR_WUNLOCK(); - SCTP_MALLOC(sctp_ifap, struct sctp_ifa *, sizeof(struct sctp_ifa), SCTP_M_IFA); - if (sctp_ifap == NULL) { -#ifdef INVARIANTS - panic("No memory for IFA"); -#endif - return (NULL); - } + sctp_ifap = new_sctp_ifap; memset(sctp_ifap, 0, sizeof(struct sctp_ifa)); sctp_ifap->ifn_p = sctp_ifnp; atomic_add_int(&sctp_ifnp->refcount, 1); @@ -761,7 +747,6 @@ sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index, (sctp_ifap->src_is_loop == 0)) { sctp_ifap->src_is_glob = 1; } - SCTP_IPI_ADDR_WLOCK(); hash_addr_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; LIST_INSERT_HEAD(hash_addr_head, sctp_ifap, next_bucket); sctp_ifap->refcount = 1; @@ -770,10 +755,13 @@ sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index, vrf->total_ifa_count++; atomic_add_int(&SCTP_BASE_INFO(ipi_count_ifas), 1); if (new_ifn_af) { - SCTP_REGISTER_INTERFACE(ifn_index, new_ifn_af); sctp_ifnp->registered_af = new_ifn_af; } SCTP_IPI_ADDR_WUNLOCK(); + if (new_sctp_ifnp != NULL) { + SCTP_FREE(new_sctp_ifnp, SCTP_M_IFN); + } + if (dynamic_add) { /* Bump up the refcount so that when the timer * completes it will drop back down. @@ -838,8 +826,7 @@ sctp_del_addr_from_vrf(uint32_t vrf_id, struct sockaddr *addr, int valid = 0; /*- * The name has priority over the ifn_index - * if its given. We do this especially for - * panda who might recycle indexes fast. + * if its given. */ if (if_name) { if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, SCTP_IFNAMSIZ) == 0) { @@ -864,7 +851,7 @@ sctp_del_addr_from_vrf(uint32_t vrf_id, struct sockaddr *addr, } SCTPDBG(SCTP_DEBUG_PCB4, "Deleting ifa %p\n", (void *)sctp_ifap); sctp_ifap->localifa_flags &= SCTP_ADDR_VALID; - /* + /* * We don't set the flag. This means that the structure will * hang around in EP's that have bound specific to it until * they close. This gives us TCP like behavior if someone @@ -920,7 +907,6 @@ sctp_del_addr_from_vrf(uint32_t vrf_id, struct sockaddr *addr, return; } - static int sctp_does_stcb_own_this_addr(struct sctp_tcb *stcb, struct sockaddr *to) { @@ -990,7 +976,7 @@ sctp_does_stcb_own_this_addr(struct sctp_tcb *stcb, struct sockaddr *to) IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { continue; } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { continue; @@ -1012,7 +998,7 @@ sctp_does_stcb_own_this_addr(struct sctp_tcb *stcb, struct sockaddr *to) #endif sin6 = &sctp_ifa->address.sin6; rsin6 = (struct sockaddr_in6 *)to; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { continue; @@ -1135,14 +1121,12 @@ sctp_does_stcb_own_this_addr(struct sctp_tcb *stcb, struct sockaddr *to) /* TSNH */ break; } - } } SCTP_IPI_ADDR_RUNLOCK(); return (0); } - static struct sctp_tcb * sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from, struct sockaddr *to, struct sctp_nets **netp, uint32_t vrf_id) @@ -1217,7 +1201,7 @@ sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from, SCTP_INP_RUNLOCK(inp); continue; } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) switch (to->sa_family) { #ifdef INET case AF_INET: @@ -1276,7 +1260,6 @@ sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from, int match = 0; LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { - if (laddr->ifa == NULL) { SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n", __func__); continue; @@ -1372,7 +1355,6 @@ sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from, } /* Does this TCB have a matching address? */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { - if (net->ro._l_addr.sa.sa_family != from->sa_family) { /* not the same family, can't be a match */ continue; @@ -1451,7 +1433,6 @@ sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from, return (NULL); } - /* * rules for use * @@ -1818,7 +1799,6 @@ null_return: return (NULL); } - /* * Find an association for a specific endpoint using the association id given * out in the COMM_UP notification @@ -1833,10 +1813,6 @@ sctp_findasoc_ep_asocid_locked(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int struct sctp_tcb *stcb; uint32_t id; - if (inp == NULL) { - SCTP_PRINTF("TSNH ep_associd\n"); - return (NULL); - } if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { SCTP_PRINTF("TSNH ep_associd0\n"); return (NULL); @@ -1870,7 +1846,6 @@ sctp_findasoc_ep_asocid_locked(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int return (NULL); } - struct sctp_tcb * sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock) { @@ -1882,7 +1857,6 @@ sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int return (stcb); } - /* * Endpoint probe expects that the INP_INFO is locked. */ @@ -1958,7 +1932,7 @@ sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head, SCTP_INP_RUNLOCK(inp); continue; } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { SCTP_INP_RUNLOCK(inp); @@ -1974,7 +1948,7 @@ sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head, SCTP_INP_RUNLOCK(inp); continue; } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { SCTP_INP_RUNLOCK(inp); @@ -2091,7 +2065,7 @@ sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head, switch (nam->sa_family) { #ifdef INET case AF_INET: -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (sin == NULL) { /* TSNH */ break; @@ -2130,7 +2104,6 @@ sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head, return (NULL); } - static struct sctp_inpcb * sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport, uint32_t vrf_id) { @@ -2191,7 +2164,6 @@ sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport, uint32_t vrf_id) return (NULL); } - int sctp_swap_inpcb_for_listen(struct sctp_inpcb *inp) { @@ -2199,6 +2171,9 @@ sctp_swap_inpcb_for_listen(struct sctp_inpcb *inp) struct sctppcbhead *head; struct sctp_inpcb *tinp, *ninp; + SCTP_INP_INFO_WLOCK_ASSERT(); + SCTP_INP_WLOCK_ASSERT(inp); + if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE)) { /* only works with port reuse on */ return (-1); @@ -2206,8 +2181,7 @@ sctp_swap_inpcb_for_listen(struct sctp_inpcb *inp) if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) { return (0); } - SCTP_INP_RUNLOCK(inp); - SCTP_INP_INFO_WLOCK(); + SCTP_INP_WUNLOCK(inp); head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(inp->sctp_lport, SCTP_BASE_INFO(hashmark))]; /* Kick out all non-listeners to the TCP hash */ @@ -2237,13 +2211,9 @@ sctp_swap_inpcb_for_listen(struct sctp_inpcb *inp) inp->sctp_flags &= ~SCTP_PCB_FLAGS_IN_TCPPOOL; head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(inp->sctp_lport, SCTP_BASE_INFO(hashmark))]; LIST_INSERT_HEAD(head, inp, sctp_hash); - SCTP_INP_WUNLOCK(inp); - SCTP_INP_RLOCK(inp); - SCTP_INP_INFO_WUNLOCK(); return (0); } - struct sctp_inpcb * sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock, uint32_t vrf_id) @@ -2329,7 +2299,6 @@ sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock, return (inp); } - /* * Find an association for an endpoint with the pointer to whom you want to * send to and the endpoint pointer. The address can be IPv4 or IPv6. We may @@ -2381,7 +2350,6 @@ sctp_findassociation_addr_sa(struct sockaddr *from, struct sockaddr *to, return (stcb); } - /* * This routine will grub through the mbuf that is a INIT or INIT-ACK and * find all addresses that the sender has specified in any address list. Each @@ -2590,7 +2558,6 @@ sctp_findassoc_by_vtag(struct sockaddr *from, struct sockaddr *to, uint32_t vtag return (NULL); } - /* * Find an association with the pointer to the inbound IP packet. This can be * a IPv4 or IPv6 packet. @@ -2760,7 +2727,6 @@ sctp_findassociation_ep_asconf(struct mbuf *m, int offset, return (stcb); } - /* * allocate a sctp_inpcb and setup a temporary binding to a port/all * addresses. This way if we don't get a bind we by default pick a ephemeral @@ -2797,17 +2763,17 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) memset(inp, 0, sizeof(*inp)); /* bump generations */ -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) inp->ip_inp.inp.inp_state = INPCB_STATE_INUSE; #endif /* setup socket pointers */ inp->sctp_socket = so; inp->ip_inp.inp.inp_socket = so; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) inp->ip_inp.inp.inp_cred = crhold(so->so_cred); #endif #ifdef INET6 -#if !defined(__Userspace__) && !defined(__Windows__) +#if !defined(__Userspace__) && !defined(_WIN32) if (INP_SOCKAF(so) == AF_INET6) { if (MODULE_GLOBAL(ip6_auto_flowlabel)) { inp->ip_inp.inp.inp_flags |= IN6P_AUTOFLOWLABEL; @@ -2832,7 +2798,7 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) inp->pktdrop_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_pktdrop_enable); inp->idata_supported = 0; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) inp->fibnum = so->so_fibnum; #else inp->fibnum = 0; @@ -2846,7 +2812,7 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) /* init the small hash table we use to track asocid <-> tcb */ inp->sctp_asocidhash = SCTP_HASH_INIT(SCTP_STACK_VTAG_HASH_SIZE, &inp->hashasocidmark); if (inp->sctp_asocidhash == NULL) { -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) crfree(inp->ip_inp.inp.inp_cred); #endif SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); @@ -2859,12 +2825,7 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) so->so_pcb = (caddr_t)inp; -#if defined(__FreeBSD__) && __FreeBSD_version < 803000 - if ((SCTP_SO_TYPE(so) == SOCK_DGRAM) || - (SCTP_SO_TYPE(so) == SOCK_SEQPACKET)) { -#else if (SCTP_SO_TYPE(so) == SOCK_SEQPACKET) { -#endif /* UDP style socket */ inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE | SCTP_PCB_FLAGS_UNBOUND); @@ -2878,14 +2839,6 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) SOCK_LOCK(so); SCTP_CLEAR_SO_NBIO(so); SOCK_UNLOCK(so); -#if defined(__Panda__) - } else if (SCTP_SO_TYPE(so) == SOCK_FASTSEQPACKET) { - inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE | - SCTP_PCB_FLAGS_UNBOUND); - } else if (SCTP_SO_TYPE(so) == SOCK_FASTSTREAM) { - inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | - SCTP_PCB_FLAGS_UNBOUND); -#endif } else { /* * unsupported socket type (RAW, etc)- in case we missed it @@ -2893,7 +2846,7 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EOPNOTSUPP); so->so_pcb = NULL; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) crfree(inp->ip_inp.inp.inp_cred); #endif SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); @@ -2915,7 +2868,7 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) SCTP_PRINTF("Out of SCTP-INPCB->hashinit - no resources\n"); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS); so->so_pcb = NULL; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) crfree(inp->ip_inp.inp.inp_cred); #endif SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); @@ -2929,7 +2882,7 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS); so->so_pcb = NULL; SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark); -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) crfree(inp->ip_inp.inp.inp_cred); #endif SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); @@ -2940,7 +2893,7 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) #endif inp->def_vrf_id = vrf_id; -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) inp->ip_inp.inp.inpcb_mtx = lck_mtx_alloc_init(SCTP_BASE_INFO(sctbinfo).mtx_grp, SCTP_BASE_INFO(sctbinfo).mtx_attr); if (inp->ip_inp.inp.inpcb_mtx == NULL) { @@ -2963,7 +2916,7 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) #endif SCTP_INP_INFO_WLOCK(); SCTP_INP_LOCK_INIT(inp); -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) INP_LOCK_INIT(&inp->ip_inp.inp, "inp", "sctpinp"); #endif SCTP_INP_READ_INIT(inp); @@ -2973,7 +2926,7 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) /* add it to the info area */ LIST_INSERT_HEAD(&SCTP_BASE_INFO(listhead), inp, sctp_list); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) inp->ip_inp.inp.inp_pcbinfo = &SCTP_BASE_INFO(sctbinfo); #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION) LIST_INSERT_HEAD(SCTP_BASE_INFO(sctbinfo).listhead, &inp->ip_inp.inp, inp_list); @@ -3000,13 +2953,13 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) m = &inp->sctp_ep; /* setup the base timeout information */ - m->sctp_timeoutticks[SCTP_TIMER_SEND] = SEC_TO_TICKS(SCTP_SEND_SEC); /* needed ? */ - m->sctp_timeoutticks[SCTP_TIMER_INIT] = SEC_TO_TICKS(SCTP_INIT_SEC); /* needed ? */ - m->sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default)); - m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default)); - m->sctp_timeoutticks[SCTP_TIMER_PMTU] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default)); - m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default)); - m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_secret_lifetime_default)); + m->sctp_timeoutticks[SCTP_TIMER_SEND] = sctp_secs_to_ticks(SCTP_SEND_SEC); /* needed ? */ + m->sctp_timeoutticks[SCTP_TIMER_INIT] = sctp_secs_to_ticks(SCTP_INIT_SEC); /* needed ? */ + m->sctp_timeoutticks[SCTP_TIMER_RECV] = sctp_msecs_to_ticks(SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default)); + m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = sctp_msecs_to_ticks(SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default)); + m->sctp_timeoutticks[SCTP_TIMER_PMTU] = sctp_secs_to_ticks(SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default)); + m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = sctp_secs_to_ticks(SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default)); + m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = sctp_secs_to_ticks(SCTP_BASE_SYSCTL(sctp_secret_lifetime_default)); /* all max/min max are in ms */ m->sctp_maxrto = SCTP_BASE_SYSCTL(sctp_rto_max_default); m->sctp_minrto = SCTP_BASE_SYSCTL(sctp_rto_min_default); @@ -3046,7 +2999,7 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) /* Setup the initial secret */ (void)SCTP_GETTIME_TIMEVAL(&time); - m->time_of_secret_change = time.tv_sec; + m->time_of_secret_change = (unsigned int)time.tv_sec; for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { m->secret_key[0][i] = sctp_select_initial_TSN(m); @@ -3054,7 +3007,7 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); /* How long is a cookie good for ? */ - m->def_cookie_life = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default)); + m->def_cookie_life = sctp_msecs_to_ticks(SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default)); /* * Initialize authentication parameters */ @@ -3080,7 +3033,6 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) return (error); } - void sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp, struct sctp_tcb *stcb) @@ -3178,24 +3130,54 @@ sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp, } } } - /* Now any running timers need to be adjusted - * since we really don't care if they are running - * or not just blast in the new_inp into all of - * them. - */ - - stcb->asoc.dack_timer.ep = (void *)new_inp; - stcb->asoc.asconf_timer.ep = (void *)new_inp; - stcb->asoc.strreset_timer.ep = (void *)new_inp; - stcb->asoc.shut_guard_timer.ep = (void *)new_inp; - stcb->asoc.autoclose_timer.ep = (void *)new_inp; - stcb->asoc.delayed_event_timer.ep = (void *)new_inp; - stcb->asoc.delete_prim_timer.ep = (void *)new_inp; + /* Now any running timers need to be adjusted. */ + if (stcb->asoc.dack_timer.ep == old_inp) { + SCTP_INP_DECR_REF(old_inp); + stcb->asoc.dack_timer.ep = new_inp; + SCTP_INP_INCR_REF(new_inp); + } + if (stcb->asoc.asconf_timer.ep == old_inp) { + SCTP_INP_DECR_REF(old_inp); + stcb->asoc.asconf_timer.ep = new_inp; + SCTP_INP_INCR_REF(new_inp); + } + if (stcb->asoc.strreset_timer.ep == old_inp) { + SCTP_INP_DECR_REF(old_inp); + stcb->asoc.strreset_timer.ep = new_inp; + SCTP_INP_INCR_REF(new_inp); + } + if (stcb->asoc.shut_guard_timer.ep == old_inp) { + SCTP_INP_DECR_REF(old_inp); + stcb->asoc.shut_guard_timer.ep = new_inp; + SCTP_INP_INCR_REF(new_inp); + } + if (stcb->asoc.autoclose_timer.ep == old_inp) { + SCTP_INP_DECR_REF(old_inp); + stcb->asoc.autoclose_timer.ep = new_inp; + SCTP_INP_INCR_REF(new_inp); + } + if (stcb->asoc.delete_prim_timer.ep == old_inp) { + SCTP_INP_DECR_REF(old_inp); + stcb->asoc.delete_prim_timer.ep = new_inp; + SCTP_INP_INCR_REF(new_inp); + } /* now what about the nets? */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { - net->pmtu_timer.ep = (void *)new_inp; - net->hb_timer.ep = (void *)new_inp; - net->rxt_timer.ep = (void *)new_inp; + if (net->pmtu_timer.ep == old_inp) { + SCTP_INP_DECR_REF(old_inp); + net->pmtu_timer.ep = new_inp; + SCTP_INP_INCR_REF(new_inp); + } + if (net->hb_timer.ep == old_inp) { + SCTP_INP_DECR_REF(old_inp); + net->hb_timer.ep = new_inp; + SCTP_INP_INCR_REF(new_inp); + } + if (net->rxt_timer.ep == old_inp) { + SCTP_INP_DECR_REF(old_inp); + net->rxt_timer.ep = new_inp; + SCTP_INP_INCR_REF(new_inp); + } } SCTP_INP_WUNLOCK(new_inp); SCTP_INP_WUNLOCK(old_inp); @@ -3240,8 +3222,8 @@ sctp_remove_laddr(struct sctp_laddr *laddr) SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), laddr); SCTP_DECR_LADDR_COUNT(); } - #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)) + /* * Don't know why, but without this there is an unknown reference when * compiling NetBSD... hmm @@ -3249,24 +3231,28 @@ sctp_remove_laddr(struct sctp_laddr *laddr) extern void in6_sin6_2_sin(struct sockaddr_in *, struct sockaddr_in6 *sin6); #endif - -/* sctp_ifap is used to bypass normal local address validation checks */ +/* + * Bind the socket, with the PCB and global info locks held. Note, if a + * socket address is specified, the PCB lock may be dropped and re-acquired. + * + * sctp_ifap is used to bypass normal local address validation checks. + */ int -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 -sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, - struct sctp_ifa *sctp_ifap, struct thread *p) -#elif defined(__Windows__) -sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, - struct sctp_ifa *sctp_ifap, PKTHREAD p) +#if defined(__FreeBSD__) && !defined(__Userspace__) +sctp_inpcb_bind_locked(struct sctp_inpcb *inp, struct sockaddr *addr, + struct sctp_ifa *sctp_ifap, struct thread *td) +#elif defined(_WIN32) && !defined(__Userspace__) +sctp_inpcb_bind_locked(struct sctp_inpcb *inp, struct sockaddr *addr, + struct sctp_ifa *sctp_ifap, PKTHREAD p) #else -sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, - struct sctp_ifa *sctp_ifap, struct proc *p) +sctp_inpcb_bind_locked(struct sctp_inpcb *inp, struct sockaddr *addr, + struct sctp_ifa *sctp_ifap, struct proc *p) #endif { /* bind a ep to a socket address */ struct sctppcbhead *head; - struct sctp_inpcb *inp, *inp_tmp; -#if defined(__FreeBSD__) || defined(__APPLE__) + struct sctp_inpcb *inp_tmp; +#if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__) struct inpcb *ip_inp; #endif int port_reuse_active = 0; @@ -3278,12 +3264,20 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, int error; uint32_t vrf_id; +#if defined(__FreeBSD__) && !defined(__Userspace__) + KASSERT(td != NULL, ("%s: null thread", __func__)); + +#endif + error = 0; lport = 0; bindall = 1; - inp = (struct sctp_inpcb *)so->so_pcb; -#if defined(__FreeBSD__) || defined(__APPLE__) - ip_inp = (struct inpcb *)so->so_pcb; +#if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__) + ip_inp = &inp->ip_inp.inp; #endif + + SCTP_INP_INFO_WLOCK_ASSERT(); + SCTP_INP_WLOCK_ASSERT(inp); + #ifdef SCTP_DEBUG if (addr) { SCTPDBG(SCTP_DEBUG_PCB1, "Bind called port: %d\n", @@ -3293,16 +3287,11 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, } #endif if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) { + error = EINVAL; /* already did a bind, subsequent binds NOT allowed ! */ - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); - return (EINVAL); + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); + goto out; } -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 -#ifdef INVARIANTS - if (p == NULL) - panic("null proc/thread"); -#endif -#endif if (addr != NULL) { switch (addr->sa_family) { #ifdef INET @@ -3312,26 +3301,28 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, /* IPV6_V6ONLY socket? */ if (SCTP_IPV6_V6ONLY(inp)) { - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); - return (EINVAL); + error = EINVAL; + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); + goto out; } #ifdef HAVE_SA_LEN if (addr->sa_len != sizeof(*sin)) { - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); - return (EINVAL); + error = EINVAL; + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); + goto out; } #endif sin = (struct sockaddr_in *)addr; lport = sin->sin_port; -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 +#if defined(__FreeBSD__) && !defined(__Userspace__) /* * For LOOPBACK the prison_local_ip4() call will transmute the ip address * to the proper value. */ - if (p && (error = prison_local_ip4(p->td_ucred, &sin->sin_addr)) != 0) { + if ((error = prison_local_ip4(td->td_ucred, &sin->sin_addr)) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); - return (error); + goto out; } #endif if (sin->sin_addr.s_addr != INADDR_ANY) { @@ -3347,23 +3338,23 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addr; - #ifdef HAVE_SA_LEN if (addr->sa_len != sizeof(*sin6)) { - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); - return (EINVAL); + error = EINVAL; + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); + goto out; } #endif lport = sin6->sin6_port; -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 +#if defined(__FreeBSD__) && !defined(__Userspace__) /* * For LOOPBACK the prison_local_ip6() call will transmute the ipv6 address * to the proper value. */ - if (p && (error = prison_local_ip6(p->td_ucred, &sin6->sin6_addr, + if ((error = prison_local_ip6(td->td_ucred, &sin6->sin6_addr, (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); - return (error); + goto out; } #endif if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { @@ -3372,28 +3363,32 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, /* KAME hack: embed scopeid */ #if defined(SCTP_KAME) if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) { - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); - return (EINVAL); + error = EINVAL; + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); + goto out; } -#elif defined(__APPLE__) +#elif defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) if (in6_embedscope(&sin6->sin6_addr, sin6, ip_inp, NULL) != 0) { #else if (in6_embedscope(&sin6->sin6_addr, sin6, ip_inp, NULL, NULL) != 0) { #endif - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); - return (EINVAL); + error = EINVAL; + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); + goto out; } -#elif defined(__FreeBSD__) +#elif defined(__FreeBSD__) && !defined(__Userspace__) error = scope6_check_id(sin6, MODULE_GLOBAL(ip6_use_defzone)); if (error != 0) { + error = EINVAL; SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); - return (error); + goto out; } #else if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) { - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); - return (EINVAL); + error = EINVAL; + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); + goto out; } #endif #endif /* SCTP_EMBEDDED_V6_SCOPE */ @@ -3412,8 +3407,9 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, #ifdef HAVE_SA_LEN if (addr->sa_len != sizeof(struct sockaddr_conn)) { - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); - return (EINVAL); + error = EINVAL; + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); + goto out; } #endif sconn = (struct sockaddr_conn *)addr; @@ -3425,58 +3421,36 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, } #endif default: - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EAFNOSUPPORT); - return (EAFNOSUPPORT); + error = EAFNOSUPPORT; + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); + goto out; } } - SCTP_INP_INFO_WLOCK(); - SCTP_INP_WLOCK(inp); /* Setup a vrf_id to be the default for the non-bind-all case. */ vrf_id = inp->def_vrf_id; - /* increase our count due to the unlock we do */ - SCTP_INP_INCR_REF(inp); if (lport) { /* * Did the caller specify a port? if so we must see if an ep * already has this one bound. */ /* got to be root to get at low ports */ -#if !defined(__Windows__) - if (ntohs(lport) < IPPORT_RESERVED) { - if ((p != NULL) && ((error = -#ifdef __FreeBSD__ -#if __FreeBSD_version > 602000 - priv_check(p, PRIV_NETINET_RESERVEDPORT) -#elif __FreeBSD_version >= 500000 - suser_cred(p->td_ucred, 0) +#if !(defined(_WIN32) && !defined(__Userspace__)) + if (ntohs(lport) < IPPORT_RESERVED && +#if defined(__FreeBSD__) && !defined(__Userspace__) + (error = priv_check(td, PRIV_NETINET_RESERVEDPORT)) != 0) { +#elif defined(__APPLE__) && !defined(__Userspace__) + (error = suser(p->p_ucred, &p->p_acflag)) != 0) { +#elif defined(__Userspace__) + /* TODO ensure uid is 0, etc... */ + 0) { #else - suser(p) -#endif -#elif defined(__APPLE__) - suser(p->p_ucred, &p->p_acflag) -#elif defined(__Userspace__) /* must be true to use raw socket */ - 1 -#else - suser(p, 0) -#endif - ) != 0)) { - SCTP_INP_DECR_REF(inp); - SCTP_INP_WUNLOCK(inp); - SCTP_INP_INFO_WUNLOCK(); - return (error); - } -#if defined(__Panda__) - if (!SCTP_IS_PRIVILEDGED(so)) { - SCTP_INP_DECR_REF(inp); - SCTP_INP_WUNLOCK(inp); - SCTP_INP_INFO_WUNLOCK(); - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EACCES); - return (EACCES); - } + (error = suser(p, 0)) != 0) { #endif + goto out; } -#endif /* __Windows__ */ +#endif + SCTP_INP_INCR_REF(inp); SCTP_INP_WUNLOCK(inp); if (bindall) { #ifdef SCTP_MVRF @@ -3503,10 +3477,11 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, port_reuse_active = 1; goto continue_anyway; } + SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); - SCTP_INP_INFO_WUNLOCK(); - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE); - return (EADDRINUSE); + error = EADDRINUSE; + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); + goto out; } #ifdef SCTP_MVRF } @@ -3529,14 +3504,16 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, port_reuse_active = 1; goto continue_anyway; } + SCTP_INP_WLOCK(inp); SCTP_INP_DECR_REF(inp); - SCTP_INP_INFO_WUNLOCK(); - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE); - return (EADDRINUSE); + error = EADDRINUSE; + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); + goto out; } } continue_anyway: SCTP_INP_WLOCK(inp); + SCTP_INP_DECR_REF(inp); if (bindall) { /* verify that no lport is not used by a singleton */ if ((port_reuse_active == 0) && @@ -3546,61 +3523,42 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) { port_reuse_active = 1; } else { - SCTP_INP_DECR_REF(inp); - SCTP_INP_WUNLOCK(inp); - SCTP_INP_INFO_WUNLOCK(); - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE); - return (EADDRINUSE); + error = EADDRINUSE; + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); + goto out; } } } } else { uint16_t first, last, candidate; uint16_t count; - int done; -#if defined(__Windows__) +#if defined(__Userspace__) + first = MODULE_GLOBAL(ipport_firstauto); + last = MODULE_GLOBAL(ipport_lastauto); +#elif defined(_WIN32) first = 1; last = 0xffff; -#else -#if defined(__Userspace__) - /* TODO ensure uid is 0, etc... */ #elif defined(__FreeBSD__) || defined(__APPLE__) if (ip_inp->inp_flags & INP_HIGHPORT) { first = MODULE_GLOBAL(ipport_hifirstauto); last = MODULE_GLOBAL(ipport_hilastauto); } else if (ip_inp->inp_flags & INP_LOWPORT) { - if (p && (error = -#ifdef __FreeBSD__ -#if __FreeBSD_version > 602000 - priv_check(p, PRIV_NETINET_RESERVEDPORT) -#elif __FreeBSD_version >= 500000 - suser_cred(p->td_ucred, 0) +#if defined(__FreeBSD__) + if ((error = priv_check(td, PRIV_NETINET_RESERVEDPORT)) != 0) { #else - suser(p) + if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) { #endif -#elif defined(__APPLE__) - suser(p->p_ucred, &p->p_acflag) -#else - suser(p, 0) -#endif - )) { - SCTP_INP_DECR_REF(inp); - SCTP_INP_WUNLOCK(inp); - SCTP_INP_INFO_WUNLOCK(); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); - return (error); + goto out; } first = MODULE_GLOBAL(ipport_lowfirstauto); last = MODULE_GLOBAL(ipport_lowlastauto); } else { -#endif first = MODULE_GLOBAL(ipport_firstauto); last = MODULE_GLOBAL(ipport_lastauto); -#if defined(__FreeBSD__) || defined(__APPLE__) } #endif -#endif /* __Windows__ */ if (first > last) { uint16_t temp; @@ -3611,8 +3569,7 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, count = last - first + 1; /* number of candidates */ candidate = first + sctp_select_initial_TSN(&inp->sctp_ep) % (count); - done = 0; - while (!done) { + for (;;) { #ifdef SCTP_MVRF for (i = 0; i < inp->num_vrfs; i++) { if (sctp_isport_inuse(inp, htons(candidate), inp->m_vrf_ids[i]) != NULL) { @@ -3620,40 +3577,35 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, } } if (i == inp->num_vrfs) { - done = 1; + lport = htons(candidate); + break; } #else if (sctp_isport_inuse(inp, htons(candidate), inp->def_vrf_id) == NULL) { - done = 1; + lport = htons(candidate); + break; } #endif - if (!done) { - if (--count == 0) { - SCTP_INP_DECR_REF(inp); - SCTP_INP_WUNLOCK(inp); - SCTP_INP_INFO_WUNLOCK(); - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE); - return (EADDRINUSE); - } - if (candidate == last) - candidate = first; - else - candidate = candidate + 1; + if (--count == 0) { + error = EADDRINUSE; + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); + goto out; } + if (candidate == last) + candidate = first; + else + candidate = candidate + 1; } - lport = htons(candidate); } - SCTP_INP_DECR_REF(inp); if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { /* * this really should not happen. The guy did a non-blocking * bind and then did a close at the same time. */ - SCTP_INP_WUNLOCK(inp); - SCTP_INP_INFO_WUNLOCK(); - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); - return (EINVAL); + error = EINVAL; + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); + goto out; } /* ok we look clear to give out this port, so lets setup the binding */ if (bindall) { @@ -3739,27 +3691,25 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, } else { /* Note for BSD we hit here always other * O/S's will pass things in via the - * sctp_ifap argument (Panda). + * sctp_ifap argument. */ ifa = sctp_find_ifa_by_addr(&store.sa, vrf_id, SCTP_ADDR_NOT_LOCKED); } if (ifa == NULL) { + error = EADDRNOTAVAIL; /* Can't find an interface with that address */ - SCTP_INP_WUNLOCK(inp); - SCTP_INP_INFO_WUNLOCK(); - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRNOTAVAIL); - return (EADDRNOTAVAIL); + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); + goto out; } #ifdef INET6 if (addr->sa_family == AF_INET6) { /* GAK, more FIXME IFA lock? */ if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { /* Can't bind a non-existent addr. */ - SCTP_INP_WUNLOCK(inp); - SCTP_INP_INFO_WUNLOCK(); - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); - return (EINVAL); + error = EINVAL; + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); + goto out; } } #endif @@ -3772,11 +3722,8 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, /* add this address to the endpoint list */ error = sctp_insert_laddr(&inp->sctp_addr_list, ifa, 0); - if (error != 0) { - SCTP_INP_WUNLOCK(inp); - SCTP_INP_INFO_WUNLOCK(); - return (error); - } + if (error != 0) + goto out; inp->laddr_count++; } /* find the bucket */ @@ -3795,12 +3742,40 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, inp->sctp_lport = lport; /* turn off just the unbound flag */ + KASSERT((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) != 0, + ("%s: inp %p is already bound", __func__, inp)); inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND; - SCTP_INP_WUNLOCK(inp); - SCTP_INP_INFO_WUNLOCK(); - return (0); +out: + return (error); } +int +#if defined(__FreeBSD__) && !defined(__Userspace__) +sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, + struct sctp_ifa *sctp_ifap, struct thread *td) +#elif defined(_WIN32) && !defined(__Userspace__) +sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, + struct sctp_ifa *sctp_ifap, PKTHREAD p) +#else +sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, + struct sctp_ifa *sctp_ifap, struct proc *p) +#endif +{ + struct sctp_inpcb *inp; + int error; + + inp = so->so_pcb; + SCTP_INP_INFO_WLOCK(); + SCTP_INP_WLOCK(inp); +#if defined(__FreeBSD__) && !defined(__Userspace__) + error = sctp_inpcb_bind_locked(inp, addr, sctp_ifap, td); +#else + error = sctp_inpcb_bind_locked(inp, addr, sctp_ifap, p); +#endif + SCTP_INP_WUNLOCK(inp); + SCTP_INP_INFO_WUNLOCK(); + return (error); +} static void sctp_iterator_inp_being_freed(struct sctp_inpcb *inp) @@ -3812,7 +3787,7 @@ sctp_iterator_inp_being_freed(struct sctp_inpcb *inp) * lock on the inp_info stuff. */ it = sctp_it_ctl.cur_it; -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) if (it && (it->vn != curvnet)) { /* Its not looking at our VNET */ return; @@ -3843,7 +3818,7 @@ sctp_iterator_inp_being_freed(struct sctp_inpcb *inp) */ SCTP_IPI_ITERATOR_WQ_LOCK(); TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) if (it->vn != curvnet) { continue; } @@ -3889,16 +3864,15 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) struct socket *so; int being_refed = 0; struct sctp_queued_to_read *sq, *nsq; -#if !defined(__Panda__) && !defined(__Userspace__) -#if !defined(__FreeBSD__) || __FreeBSD_version < 500000 +#if !defined(__Userspace__) +#if !defined(__FreeBSD__) sctp_rtentry_t *rt; #endif #endif int cnt; sctp_sharedkey_t *shared_key, *nshared_key; - -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) sctp_lock_assert(SCTP_INP_SO(inp)); #endif #ifdef SCTP_LOG_CLOSING @@ -3908,30 +3882,25 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) /* mark any iterators on the list or being processed */ sctp_iterator_inp_being_freed(inp); SCTP_ITERATOR_UNLOCK(); - so = inp->sctp_socket; - if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { - /* been here before.. eeks.. get out of here */ - SCTP_PRINTF("This conflict in free SHOULD not be happening! from %d, imm %d\n", from, immediate); -#ifdef SCTP_LOG_CLOSING - sctp_log_closing(inp, NULL, 1); -#endif - return; - } + SCTP_ASOC_CREATE_LOCK(inp); SCTP_INP_INFO_WLOCK(); - SCTP_INP_WLOCK(inp); + so = inp->sctp_socket; + KASSERT((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) != 0, + ("%s: inp %p still has socket", __func__, inp)); + KASSERT((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0, + ("%s: double free of inp %p", __func__, inp)); if (from == SCTP_CALLED_AFTER_CMPSET_OFCLOSE) { inp->sctp_flags &= ~SCTP_PCB_FLAGS_CLOSE_IP; /* socket is gone, so no more wakeups allowed */ inp->sctp_flags |= SCTP_PCB_FLAGS_DONT_WAKE; inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; - } /* First time through we have the socket lock, after that no more. */ sctp_timer_stop(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL, - SCTP_FROM_SCTP_PCB + SCTP_LOC_1); + SCTP_FROM_SCTP_PCB + SCTP_LOC_1); if (inp->control) { sctp_m_freem(inp->control); @@ -3951,6 +3920,7 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) LIST_FOREACH_SAFE(asoc, &inp->sctp_asoc_list, sctp_tcblist, nasoc) { SCTP_TCB_LOCK(asoc); if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { + asoc->sctp_socket = NULL; /* Skip guys being freed */ cnt_in_sd++; if (asoc->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE) { @@ -4031,15 +4001,13 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) sctp_send_shutdown(asoc, netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, asoc->sctp_ep, asoc, netp); - sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc, - asoc->asoc.primary_destination); + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc, NULL); sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_LOCKED); } } else { /* mark into shutdown pending */ SCTP_ADD_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); - sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc, - asoc->asoc.primary_destination); + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc, NULL); if ((*asoc->asoc.ss_functions.sctp_ss_is_user_msgs_incomplete)(asoc, &asoc->asoc)) { SCTP_ADD_SUBSTATE(asoc, SCTP_STATE_PARTIAL_MSG_LEFT); } @@ -4101,12 +4069,17 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) cnt = 0; LIST_FOREACH_SAFE(asoc, &inp->sctp_asoc_list, sctp_tcblist, nasoc) { SCTP_TCB_LOCK(asoc); + if (immediate != SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE) { + /* Disconnect the socket please */ + asoc->sctp_socket = NULL; + SCTP_ADD_SUBSTATE(asoc, SCTP_STATE_CLOSED_SOCKET); + } if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { if (asoc->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE) { SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_IN_ACCEPT_QUEUE); sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, asoc, NULL); } - cnt++; + cnt++; SCTP_TCB_UNLOCK(asoc); continue; } @@ -4135,7 +4108,6 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) } if (cnt) { /* Ok we have someone out there that will kill us */ - (void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 3); #endif @@ -4150,11 +4122,10 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) being_refed++; if (SCTP_ASOC_CREATE_LOCK_CONTENDED(inp)) being_refed++; - + /* NOTE: 0 refcount also means no timers are referencing us. */ if ((inp->refcount) || (being_refed) || (inp->sctp_flags & SCTP_PCB_FLAGS_CLOSE_IP)) { - (void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 4); #endif @@ -4173,30 +4144,15 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) SCTP_INP_WUNLOCK(inp); SCTP_ASOC_CREATE_UNLOCK(inp); SCTP_INP_INFO_WUNLOCK(); - /* Now we release all locks. Since this INP - * cannot be found anymore except possibly by the - * kill timer that might be running. We call - * the drain function here. It should hit the case - * were it sees the ACTIVE flag cleared and exit - * out freeing us to proceed and destroy everything. - */ - if (from != SCTP_CALLED_FROM_INPKILL_TIMER) { - (void)SCTP_OS_TIMER_STOP_DRAIN(&inp->sctp_ep.signature_change.timer); - } else { - /* Probably un-needed */ - (void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer); - } #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 5); #endif - -#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)) -#if !defined(__FreeBSD__) || __FreeBSD_version < 500000 +#if !(defined(_WIN32) || defined(__Userspace__)) +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) rt = ip_pcb->inp_route.ro_rt; #endif #endif - if ((inp->sctp_asocidhash) != NULL) { SCTP_HASH_FREE(inp->sctp_asocidhash, inp->hashasocidmark); inp->sctp_asocidhash = NULL; @@ -4227,33 +4183,21 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) * macro here since le_next will get freed as part of the * sctp_free_assoc() call. */ -#ifndef __Panda__ if (ip_pcb->inp_options) { (void)sctp_m_free(ip_pcb->inp_options); ip_pcb->inp_options = 0; } -#endif - -#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)) -#if !defined(__FreeBSD__) || __FreeBSD_version < 500000 +#if !(defined(_WIN32) || defined(__Userspace__)) +#if !defined(__FreeBSD__) if (rt) { RTFREE(rt); ip_pcb->inp_route.ro_rt = 0; } #endif -#if defined(__FreeBSD__) && __FreeBSD_version < 803000 -#ifdef INET - if (ip_pcb->inp_moptions) { - inp_freemoptions(ip_pcb->inp_moptions); - ip_pcb->inp_moptions = 0; - } #endif -#endif -#endif - #ifdef INET6 -#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)) -#if defined(__FreeBSD__) || defined(__APPLE__) +#if !(defined(_WIN32) || defined(__Userspace__)) +#if (defined(__FreeBSD__) || defined(__APPLE__) && !defined(__Userspace__)) if (ip_pcb->inp_vflag & INP_IPV6) { #else if (inp->inp_vflag & INP_IPV6) { @@ -4262,11 +4206,7 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) } #endif #endif /* INET6 */ -#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) - inp->inp_vflag = 0; -#else ip_pcb->inp_vflag = 0; -#endif /* free up authentication fields */ if (inp->sctp_ep.local_auth_chunks != NULL) sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); @@ -4279,7 +4219,7 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) /*sa_ignore FREED_MEMORY*/ } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) inp->ip_inp.inp.inp_state = INPCB_STATE_DEAD; if (in_pcb_checkstate(&inp->ip_inp.inp, WNT_STOPUSING, 1) != WNT_STOPUSING) { #ifdef INVARIANTS @@ -4317,14 +4257,14 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) inp->sctp_tcbhash = NULL; } /* Now we must put the ep memory back into the zone pool */ -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) crfree(inp->ip_inp.inp.inp_cred); INP_LOCK_DESTROY(&inp->ip_inp.inp); #endif SCTP_INP_LOCK_DESTROY(inp); SCTP_INP_READ_DESTROY(inp); SCTP_ASOC_CREATE_LOCK_DESTROY(inp); -#if !defined(__APPLE__) +#if !(defined(__APPLE__) && !defined(__Userspace__)) SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); SCTP_DECR_EP_COUNT(); #else @@ -4332,7 +4272,6 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) #endif } - struct sctp_nets * sctp_findnet(struct sctp_tcb *stcb, struct sockaddr *addr) { @@ -4345,13 +4284,9 @@ sctp_findnet(struct sctp_tcb *stcb, struct sockaddr *addr) return (NULL); } - int sctp_is_address_on_local_host(struct sockaddr *addr, uint32_t vrf_id) { -#ifdef __Panda__ - return (0); -#else struct sctp_ifa *sctp_ifa; sctp_ifa = sctp_find_ifa_by_addr(addr, vrf_id, SCTP_ADDR_NOT_LOCKED); if (sctp_ifa) { @@ -4359,7 +4294,6 @@ sctp_is_address_on_local_host(struct sockaddr *addr, uint32_t vrf_id) } else { return (0); } -#endif } /* @@ -4608,7 +4542,7 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr, struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) (void)in6_embedscope(&sin6->sin6_addr, sin6, &stcb->sctp_ep->ip_inp.inp, NULL); #else @@ -4675,18 +4609,22 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr, } else { imtu = 0; } - rmtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, net->ro.ro_rt); -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) + rmtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, net->ro.ro_nh); hcmtu = sctp_hc_get_mtu(&net->ro._l_addr, stcb->sctp_ep->fibnum); #else + rmtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, net->ro.ro_rt); hcmtu = 0; #endif net->mtu = sctp_min_mtu(hcmtu, rmtu, imtu); +#if defined(__FreeBSD__) && !defined(__Userspace__) +#else if (rmtu == 0) { /* Start things off to match mtu of interface please. */ SCTP_SET_MTU_OF_ROUTE(&net->ro._l_addr.sa, net->ro.ro_rt, net->mtu); } +#endif } } #endif @@ -4775,7 +4713,7 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr, */ net->find_pseudo_cumack = 1; net->find_rtx_pseudo_cumack = 1; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) /* Choose an initial flowid. */ net->flowid = stcb->asoc.my_vtag ^ ntohs(stcb->rport) ^ @@ -4786,26 +4724,36 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr, *netp = net; } netfirst = TAILQ_FIRST(&stcb->asoc.nets); +#if defined(__FreeBSD__) && !defined(__Userspace__) + if (net->ro.ro_nh == NULL) { +#else if (net->ro.ro_rt == NULL) { +#endif /* Since we have no route put it at the back */ TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next); } else if (netfirst == NULL) { /* We are the first one in the pool. */ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); +#if defined(__FreeBSD__) && !defined(__Userspace__) + } else if (netfirst->ro.ro_nh == NULL) { +#else } else if (netfirst->ro.ro_rt == NULL) { +#endif /* * First one has NO route. Place this one ahead of the first * one. */ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); -#ifndef __Panda__ +#if defined(__FreeBSD__) && !defined(__Userspace__) + } else if (net->ro.ro_nh->nh_ifp != netfirst->ro.ro_nh->nh_ifp) { +#else } else if (net->ro.ro_rt->rt_ifp != netfirst->ro.ro_rt->rt_ifp) { +#endif /* * This one has a different interface than the one at the * top of the list. Place it ahead. */ TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); -#endif } else { /* * Ok we have the same interface as the first one. Move @@ -4821,34 +4769,39 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr, /* End of the list */ TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next); break; +#if defined(__FreeBSD__) && !defined(__Userspace__) + } else if (netlook->ro.ro_nh == NULL) { +#else } else if (netlook->ro.ro_rt == NULL) { +#endif /* next one has NO route */ TAILQ_INSERT_BEFORE(netfirst, net, sctp_next); break; - } -#ifndef __Panda__ - else if (netlook->ro.ro_rt->rt_ifp != net->ro.ro_rt->rt_ifp) +#if defined(__FreeBSD__) && !defined(__Userspace__) + } else if (netlook->ro.ro_nh->nh_ifp != net->ro.ro_nh->nh_ifp) { #else - else + } else if (netlook->ro.ro_rt->rt_ifp != net->ro.ro_rt->rt_ifp) { #endif - { TAILQ_INSERT_AFTER(&stcb->asoc.nets, netlook, - net, sctp_next); + net, sctp_next); break; } -#ifndef __Panda__ /* Shift forward */ netfirst = netlook; -#endif } while (netlook != NULL); } /* got to have a primary set */ if (stcb->asoc.primary_destination == 0) { stcb->asoc.primary_destination = net; +#if defined(__FreeBSD__) && !defined(__Userspace__) + } else if ((stcb->asoc.primary_destination->ro.ro_nh == NULL) && + (net->ro.ro_nh) && +#else } else if ((stcb->asoc.primary_destination->ro.ro_rt == NULL) && - (net->ro.ro_rt) && - ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) { + (net->ro.ro_rt) && +#endif + ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) { /* No route to current primary adopt new primary */ stcb->asoc.primary_destination = net; } @@ -4869,7 +4822,6 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr, return (0); } - static uint32_t sctp_aloc_a_assoc_id(struct sctp_inpcb *inp, struct sctp_tcb *stcb) { @@ -4906,21 +4858,21 @@ sctp_aloc_a_assoc_id(struct sctp_inpcb *inp, struct sctp_tcb *stcb) * careful to add all additional addresses once they are know right away or * else the assoc will be may experience a blackout scenario. */ -struct sctp_tcb * -sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, - int *error, uint32_t override_tag, uint32_t vrf_id, - uint16_t o_streams, uint16_t port, -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 - struct thread *p, -#elif defined(__Windows__) - PKTHREAD p, +static struct sctp_tcb * +sctp_aloc_assoc_locked(struct sctp_inpcb *inp, struct sockaddr *firstaddr, + int *error, uint32_t override_tag, uint32_t initial_tsn, + uint32_t vrf_id, uint16_t o_streams, uint16_t port, +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct thread *p, +#elif defined(_WIN32) && !defined(__Userspace__) + PKTHREAD p, #else #if defined(__Userspace__) - /* __Userspace__ NULL proc is going to be passed here. See sctp_lower_sosend */ + /* __Userspace__ NULL proc is going to be passed here. See sctp_lower_sosend */ #endif - struct proc *p, + struct proc *p, #endif - int initialize_auth_params) + int initialize_auth_params) { /* note the p argument is only valid in unbound sockets */ @@ -4930,6 +4882,9 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, uint16_t rport; int err; + SCTP_INP_INFO_WLOCK_ASSERT(); + SCTP_INP_WLOCK_ASSERT(inp); + /* * Assumption made here: Caller has done a * sctp_findassociation_ep_addr(ep, addr's); to make sure the @@ -4946,7 +4901,11 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, *error = EINVAL; return (NULL); } - SCTP_INP_RLOCK(inp); + if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); + *error = EINVAL; + return (NULL); + } if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE)) || (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { @@ -4956,7 +4915,6 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, * sctp_aloc_assoc.. or the one-2-many socket. If a peeled * off, or connected one does this.. its an error. */ - SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); *error = EINVAL; return (NULL); @@ -4965,7 +4923,6 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) { if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) || (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED)) { - SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); *error = EINVAL; return (NULL); @@ -5011,9 +4968,16 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, if ((ntohs(sin->sin_port) == 0) || (sin->sin_addr.s_addr == INADDR_ANY) || (sin->sin_addr.s_addr == INADDR_BROADCAST) || - IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { + IN_MULTICAST(ntohl(sin->sin_addr.s_addr)) || +#if defined(__Userspace__) + (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) != 0) || + (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) != 0) && + (SCTP_IPV6_V6ONLY(inp) != 0)))) { +#else + (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) != 0) && + (SCTP_IPV6_V6ONLY(inp) != 0))) { +#endif /* Invalid address */ - SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); *error = EINVAL; return (NULL); @@ -5030,9 +4994,9 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, sin6 = (struct sockaddr_in6 *)firstaddr; if ((ntohs(sin6->sin6_port) == 0) || IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || - IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { + IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr) || + ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0)) { /* Invalid address */ - SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); *error = EINVAL; return (NULL); @@ -5048,9 +5012,9 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, sconn = (struct sockaddr_conn *)firstaddr; if ((ntohs(sconn->sconn_port) == 0) || - (sconn->sconn_addr == NULL)) { + (sconn->sconn_addr == NULL) || + ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) == 0)) { /* Invalid address */ - SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); *error = EINVAL; return (NULL); @@ -5061,26 +5025,16 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, #endif default: /* not supported family type */ - SCTP_INP_RUNLOCK(inp); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); *error = EINVAL; return (NULL); } - SCTP_INP_RUNLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { /* * If you have not performed a bind, then we need to do the * ephemeral bind for you. */ - if ((err = sctp_inpcb_bind(inp->sctp_socket, - (struct sockaddr *)NULL, - (struct sctp_ifa *)NULL, -#ifndef __Panda__ - p -#else - (struct proc *)NULL -#endif - ))) { + if ((err = sctp_inpcb_bind_locked(inp, NULL, NULL, p))) { /* bind error, probably perm */ *error = err; return (NULL); @@ -5104,7 +5058,7 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, /* setup back pointer's */ stcb->sctp_ep = inp; stcb->sctp_socket = inp->sctp_socket; - if ((err = sctp_init_asoc(inp, stcb, override_tag, vrf_id, o_streams))) { + if ((err = sctp_init_asoc(inp, stcb, override_tag, initial_tsn, vrf_id, o_streams))) { /* failed */ SCTP_TCB_LOCK_DESTROY(stcb); SCTP_TCB_SEND_LOCK_DESTROY(stcb); @@ -5113,21 +5067,6 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, *error = err; return (NULL); } - /* and the port */ - SCTP_INP_INFO_WLOCK(); - SCTP_INP_WLOCK(inp); - if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { - /* inpcb freed while alloc going on */ - SCTP_TCB_LOCK_DESTROY(stcb); - SCTP_TCB_SEND_LOCK_DESTROY(stcb); - SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); - SCTP_INP_WUNLOCK(inp); - SCTP_INP_INFO_WUNLOCK(); - SCTP_DECR_ASOC_COUNT(); - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); - *error = EINVAL; - return (NULL); - } SCTP_TCB_LOCK(stcb); asoc->assoc_id = sctp_aloc_a_assoc_id(inp, stcb); @@ -5135,9 +5074,8 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; /* put it in the bucket in the vtag hash of assoc's for the system */ LIST_INSERT_HEAD(head, stcb, sctp_asocs); - SCTP_INP_INFO_WUNLOCK(); - if ((err = sctp_add_remote_addr(stcb, firstaddr, NULL, port, SCTP_DO_SETSCOPE, SCTP_ALLOC_ASOC))) { + if (sctp_add_remote_addr(stcb, firstaddr, NULL, port, SCTP_DO_SETSCOPE, SCTP_ALLOC_ASOC)) { /* failure.. memory error? */ if (asoc->strmout) { SCTP_FREE(asoc->strmout, SCTP_M_STRMO); @@ -5155,6 +5093,7 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, SCTP_TCB_UNLOCK(stcb); SCTP_TCB_LOCK_DESTROY(stcb); SCTP_TCB_SEND_LOCK_DESTROY(stcb); + LIST_REMOVE(stcb, sctp_asocs); LIST_REMOVE(stcb, sctp_tcbasocidhash); SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); SCTP_INP_WUNLOCK(inp); @@ -5168,7 +5107,6 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, SCTP_OS_TIMER_INIT(&asoc->asconf_timer.timer); SCTP_OS_TIMER_INIT(&asoc->shut_guard_timer.timer); SCTP_OS_TIMER_INIT(&asoc->autoclose_timer.timer); - SCTP_OS_TIMER_INIT(&asoc->delayed_event_timer.timer); SCTP_OS_TIMER_INIT(&asoc->delete_prim_timer.timer); LIST_INSERT_HEAD(&inp->sctp_asoc_list, stcb, sctp_tcblist); @@ -5181,17 +5119,76 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, if (initialize_auth_params == SCTP_INITIALIZE_AUTH_PARAMS) { sctp_initialize_auth_params(inp, stcb); } - SCTP_INP_WUNLOCK(inp); SCTPDBG(SCTP_DEBUG_PCB1, "Association %p now allocated\n", (void *)stcb); return (stcb); } +struct sctp_tcb * +sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, + int *error, uint32_t override_tag, uint32_t initial_tsn, + uint32_t vrf_id, uint16_t o_streams, uint16_t port, +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct thread *p, +#elif defined(_WIN32) && !defined(__Userspace__) + PKTHREAD p, +#else + struct proc *p, +#endif + int initialize_auth_params) +{ + struct sctp_tcb *stcb; + + SCTP_INP_INFO_WLOCK(); + SCTP_INP_WLOCK(inp); + stcb = sctp_aloc_assoc_locked(inp, firstaddr, error, override_tag, + initial_tsn, vrf_id, o_streams, port, p, initialize_auth_params); + SCTP_INP_INFO_WUNLOCK(); + SCTP_INP_WUNLOCK(inp); + return (stcb); +} + +struct sctp_tcb * +sctp_aloc_assoc_connected(struct sctp_inpcb *inp, struct sockaddr *firstaddr, + int *error, uint32_t override_tag, uint32_t initial_tsn, + uint32_t vrf_id, uint16_t o_streams, uint16_t port, +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct thread *p, +#elif defined(_WIN32) && !defined(__Userspace__) + PKTHREAD p, +#else + struct proc *p, +#endif + int initialize_auth_params) +{ + struct sctp_tcb *stcb; + + SCTP_INP_INFO_WLOCK(); + SCTP_INP_WLOCK(inp); + if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && + SCTP_IS_LISTENING(inp)) { + SCTP_INP_INFO_WUNLOCK(); + SCTP_INP_WUNLOCK(inp); + *error = EINVAL; + return (NULL); + } + stcb = sctp_aloc_assoc_locked(inp, firstaddr, error, override_tag, + initial_tsn, vrf_id, o_streams, port, p, initialize_auth_params); + SCTP_INP_INFO_WUNLOCK(); + if (stcb != NULL && (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) { + inp->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; + soisconnecting(inp->sctp_socket); + } + SCTP_INP_WUNLOCK(inp); + return (stcb); +} void sctp_remove_net(struct sctp_tcb *stcb, struct sctp_nets *net) { + struct sctp_inpcb *inp; struct sctp_association *asoc; + inp = stcb->sctp_ep; asoc = &stcb->asoc; asoc->numnets--; TAILQ_REMOVE(&asoc->nets, net, sctp_next); @@ -5235,10 +5232,19 @@ out: /* Clear net */ asoc->last_control_chunk_from = NULL; } + if (net == asoc->last_net_cmt_send_started) { + /* Clear net */ + asoc->last_net_cmt_send_started = NULL; + } if (net == stcb->asoc.alternate) { sctp_free_remote_addr(stcb->asoc.alternate); stcb->asoc.alternate = NULL; } + sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, + SCTP_FROM_SCTP_PCB + SCTP_LOC_9); + sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, + SCTP_FROM_SCTP_PCB + SCTP_LOC_10); + net->dest_state |= SCTP_ADDR_BEING_DELETED; sctp_free_remote_addr(net); } @@ -5282,100 +5288,70 @@ sctp_del_remote_addr(struct sctp_tcb *stcb, struct sockaddr *remaddr) return (-2); } -void -sctp_delete_from_timewait(uint32_t tag, uint16_t lport, uint16_t rport) +static bool +sctp_is_in_timewait(uint32_t tag, uint16_t lport, uint16_t rport, uint32_t now) { struct sctpvtaghead *chain; struct sctp_tagblock *twait_block; - int found = 0; int i; + SCTP_INP_INFO_LOCK_ASSERT(); chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { - if ((twait_block->vtag_block[i].v_tag == tag) && - (twait_block->vtag_block[i].lport == lport) && - (twait_block->vtag_block[i].rport == rport)) { - twait_block->vtag_block[i].tv_sec_at_expire = 0; - twait_block->vtag_block[i].v_tag = 0; - twait_block->vtag_block[i].lport = 0; - twait_block->vtag_block[i].rport = 0; - found = 1; - break; - } - } - if (found) - break; - } -} - -int -sctp_is_in_timewait(uint32_t tag, uint16_t lport, uint16_t rport) -{ - struct sctpvtaghead *chain; - struct sctp_tagblock *twait_block; - int found = 0; - int i; - - SCTP_INP_INFO_WLOCK(); - chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; - LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { - for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { - if ((twait_block->vtag_block[i].v_tag == tag) && - (twait_block->vtag_block[i].lport == lport) && + if ((twait_block->vtag_block[i].tv_sec_at_expire >= now) && + (twait_block->vtag_block[i].v_tag == tag) && + (twait_block->vtag_block[i].lport == lport) && (twait_block->vtag_block[i].rport == rport)) { - found = 1; - break; + return (true); } } - if (found) - break; } - SCTP_INP_INFO_WUNLOCK(); - return (found); + return (false); } +static void +sctp_set_vtag_block(struct sctp_timewait *vtag_block, uint32_t time, + uint32_t tag, uint16_t lport, uint16_t rport) +{ + vtag_block->tv_sec_at_expire = time; + vtag_block->v_tag = tag; + vtag_block->lport = lport; + vtag_block->rport = rport; +} -void -sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time, uint16_t lport, uint16_t rport) +static void +sctp_add_vtag_to_timewait(uint32_t tag, uint16_t lport, uint16_t rport) { struct sctpvtaghead *chain; struct sctp_tagblock *twait_block; struct timeval now; - int set, i; + uint32_t time; + int i; + bool set; - if (time == 0) { - /* Its disabled */ - return; - } + SCTP_INP_INFO_WLOCK_ASSERT(); (void)SCTP_GETTIME_TIMEVAL(&now); + time = (uint32_t)now.tv_sec + SCTP_BASE_SYSCTL(sctp_vtag_time_wait); chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; - set = 0; + set = false; LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { /* Block(s) present, lets find space, and expire on the fly */ for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { - if ((twait_block->vtag_block[i].v_tag == 0) && - !set) { - twait_block->vtag_block[i].tv_sec_at_expire = - now.tv_sec + time; - twait_block->vtag_block[i].v_tag = tag; - twait_block->vtag_block[i].lport = lport; - twait_block->vtag_block[i].rport = rport; - set = 1; - } else if ((twait_block->vtag_block[i].v_tag) && - ((long)twait_block->vtag_block[i].tv_sec_at_expire < now.tv_sec)) { - /* Audit expires this guy */ - twait_block->vtag_block[i].tv_sec_at_expire = 0; - twait_block->vtag_block[i].v_tag = 0; - twait_block->vtag_block[i].lport = 0; - twait_block->vtag_block[i].rport = 0; - if (set == 0) { - /* Reuse it for my new tag */ - twait_block->vtag_block[i].tv_sec_at_expire = now.tv_sec + time; - twait_block->vtag_block[i].v_tag = tag; - twait_block->vtag_block[i].lport = lport; - twait_block->vtag_block[i].rport = rport; - set = 1; + if ((twait_block->vtag_block[i].v_tag == 0) && !set) { + sctp_set_vtag_block(twait_block->vtag_block + i, time, tag, lport, rport); + set = true; + continue; + } + if ((twait_block->vtag_block[i].v_tag != 0) && + (twait_block->vtag_block[i].tv_sec_at_expire < (uint32_t)now.tv_sec)) { + if (set) { + /* Audit expires this guy */ + sctp_set_vtag_block(twait_block->vtag_block + i, 0, 0, 0, 0); + } else { + /* Reuse it for the new tag */ + sctp_set_vtag_block(twait_block->vtag_block + i, time, tag, lport, rport); + set = true; } } } @@ -5392,17 +5368,11 @@ sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time, uint16_t lport, uint16_t SCTP_MALLOC(twait_block, struct sctp_tagblock *, sizeof(struct sctp_tagblock), SCTP_M_TIMW); if (twait_block == NULL) { -#ifdef INVARIANTS - panic("Can not alloc tagblock"); -#endif return; } memset(twait_block, 0, sizeof(struct sctp_tagblock)); LIST_INSERT_HEAD(chain, twait_block, sctp_nxt_tagblock); - twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + time; - twait_block->vtag_block[0].v_tag = tag; - twait_block->vtag_block[0].lport = lport; - twait_block->vtag_block[0].rport = rport; + sctp_set_vtag_block(twait_block->vtag_block, time, tag, lport, rport); } } @@ -5447,10 +5417,6 @@ sctp_clean_up_stream(struct sctp_tcb *stcb, struct sctp_readhead *rh) } } -#ifdef __Panda__ -void panda_wakeup_socket(struct socket *so); -#endif - /*- * Free the association after un-hashing the remote port. This * function ALWAYS returns holding NO LOCK on the stcb. It DOES @@ -5476,7 +5442,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre struct socket *so; /* first, lets purge the entry from the hash table. */ -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) sctp_lock_assert(SCTP_INP_SO(inp)); #endif @@ -5490,17 +5456,18 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre /* there is no asoc, really TSNH :-0 */ return (1); } + SCTP_TCB_SEND_LOCK(stcb); if (stcb->asoc.alternate) { sctp_free_remote_addr(stcb->asoc.alternate); stcb->asoc.alternate = NULL; } -#if !defined(__APPLE__) /* TEMP: moved to below */ - /* TEMP CODE */ +#if !(defined(__APPLE__) && !defined(__Userspace__)) + /* TEMP CODE */ if (stcb->freed_from_where == 0) { /* Only record the first place free happened from */ stcb->freed_from_where = from_location; } - /* TEMP CODE */ + /* TEMP CODE */ #endif asoc = &stcb->asoc; @@ -5526,6 +5493,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre /* nope, reader or writer in the way */ sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); /* no asoc destroyed */ + SCTP_TCB_SEND_UNLOCK(stcb); SCTP_TCB_UNLOCK(stcb); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, stcb, 8); @@ -5533,37 +5501,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre return (0); } } - /* now clean up any other timers */ - (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); - asoc->dack_timer.self = NULL; - (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); - /*- - * For stream reset we don't blast this unless - * it is a str-reset timer, it might be the - * free-asoc timer which we DON'T want to - * disturb. - */ - if (asoc->strreset_timer.type == SCTP_TIMER_TYPE_STRRESET) - asoc->strreset_timer.self = NULL; - (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); - asoc->asconf_timer.self = NULL; - (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); - asoc->autoclose_timer.self = NULL; - (void)SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer); - asoc->shut_guard_timer.self = NULL; - (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); - asoc->delayed_event_timer.self = NULL; - /* Mobility adaptation */ - (void)SCTP_OS_TIMER_STOP(&asoc->delete_prim_timer.timer); - asoc->delete_prim_timer.self = NULL; - TAILQ_FOREACH(net, &asoc->nets, sctp_next) { - (void)SCTP_OS_TIMER_STOP(&net->rxt_timer.timer); - net->rxt_timer.self = NULL; - (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); - net->pmtu_timer.self = NULL; - (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer); - net->hb_timer.self = NULL; - } + /* Now clean up any other timers */ + sctp_stop_association_timers(stcb, false); /* Now the read queue needs to be cleaned up (only once) */ if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) { SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_ABOUT_TO_BE_FREED); @@ -5616,7 +5555,6 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_IN_ACCEPT_QUEUE); sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); } - SCTP_TCB_UNLOCK(stcb); if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) /* nothing around */ @@ -5626,6 +5564,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre sctp_sorwakeup(inp, so); sctp_sowwakeup(inp, so); } + SCTP_TCB_SEND_UNLOCK(stcb); + SCTP_TCB_UNLOCK(stcb); #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, stcb, 9); @@ -5654,10 +5594,12 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre if (from_inpcbfree == SCTP_NORMAL_PROC) { atomic_add_int(&stcb->asoc.refcnt, 1); + SCTP_TCB_SEND_UNLOCK(stcb); SCTP_TCB_UNLOCK(stcb); SCTP_INP_INFO_WLOCK(); SCTP_INP_WLOCK(inp); SCTP_TCB_LOCK(stcb); + SCTP_TCB_SEND_LOCK(stcb); } /* Double check the GONE flag */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || @@ -5681,7 +5623,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre SS_ISCONFIRMING | SS_ISCONNECTED); so->so_state |= SS_ISDISCONNECTED; -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) socantrcvmore(so); #else socantrcvmore_locked(so); @@ -5708,6 +5650,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre SCTP_INP_INFO_WUNLOCK(); SCTP_INP_WUNLOCK(inp); } + SCTP_TCB_SEND_UNLOCK(stcb); SCTP_TCB_UNLOCK(stcb); return (0); } @@ -5718,6 +5661,9 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre if (stcb->asoc.in_asocid_hash) { LIST_REMOVE(stcb, sctp_tcbasocidhash); } + if (inp->sctp_socket == NULL) { + stcb->sctp_socket = NULL; + } /* Now lets remove it from the list of ALL associations in the EP */ LIST_REMOVE(stcb, sctp_tcblist); if (from_inpcbfree == SCTP_NORMAL_PROC) { @@ -5726,32 +5672,18 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre } /* pull from vtag hash */ LIST_REMOVE(stcb, sctp_asocs); - sctp_add_vtag_to_timewait(asoc->my_vtag, SCTP_BASE_SYSCTL(sctp_vtag_time_wait), - inp->sctp_lport, stcb->rport); + sctp_add_vtag_to_timewait(asoc->my_vtag, inp->sctp_lport, stcb->rport); /* Now restop the timers to be sure * this is paranoia at is finest! */ - (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); - (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); - (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); - (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); - (void)SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer); - (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); - (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); - TAILQ_FOREACH(net, &asoc->nets, sctp_next) { - (void)SCTP_OS_TIMER_STOP(&net->rxt_timer.timer); - (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); - (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer); - } + sctp_stop_association_timers(stcb, true); - asoc->strreset_timer.type = SCTP_TIMER_TYPE_NONE; /* * The chunk lists and such SHOULD be empty but we check them just * in case. */ /* anything on the wheel needs to be removed */ - SCTP_TCB_SEND_LOCK(stcb); for (i = 0; i < asoc->streamoutcnt; i++) { struct sctp_stream_out *outs; @@ -5782,7 +5714,6 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED); } } - SCTP_TCB_SEND_UNLOCK(stcb); /*sa_ignore FREED_MEMORY*/ TAILQ_FOREACH_SAFE(strrst, &asoc->resetHead, next_resp, nstrrst) { TAILQ_REMOVE(&asoc->resetHead, strrst, next_resp); @@ -5984,6 +5915,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre /* Insert new items here :> */ /* Get rid of LOCK */ + SCTP_TCB_SEND_UNLOCK(stcb); SCTP_TCB_UNLOCK(stcb); SCTP_TCB_LOCK_DESTROY(stcb); SCTP_TCB_SEND_LOCK_DESTROY(stcb); @@ -5991,7 +5923,8 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre SCTP_INP_INFO_WUNLOCK(); SCTP_INP_RLOCK(inp); } -#if defined(__APPLE__) /* TEMP CODE */ +#if defined(__APPLE__) && !defined(__Userspace__) + /* TEMP CODE */ stcb->freed_from_where = from_location; #endif #ifdef SCTP_TRACK_FREED_ASOCS @@ -6024,16 +5957,12 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE, SCTP_CALLED_DIRECTLY_NOCMPSET); SCTP_INP_DECR_REF(inp); - goto out_of; } else { /* The socket is still open. */ SCTP_INP_DECR_REF(inp); + SCTP_INP_RUNLOCK(inp); } } - if (from_inpcbfree == SCTP_NORMAL_PROC) { - SCTP_INP_RUNLOCK(inp); - } - out_of: /* destroyed the asoc */ #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 11); @@ -6041,8 +5970,6 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre return (1); } - - /* * determine if a destination is "reachable" based upon the addresses bound * to the current endpoint (e.g. only v4 or v6 currently bound) @@ -6082,20 +6009,12 @@ sctp_destination_is_reachable(struct sctp_tcb *stcb, struct sockaddr *destaddr) switch (destaddr->sa_family) { #ifdef INET6 case AF_INET6: -#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) - answer = inp->inp_vflag & INP_IPV6; -#else answer = inp->ip_inp.inp.inp_vflag & INP_IPV6; -#endif break; #endif #ifdef INET case AF_INET: -#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) - answer = inp->inp_vflag & INP_IPV4; -#else answer = inp->ip_inp.inp.inp_vflag & INP_IPV4; -#endif break; #endif #if defined(__Userspace__) @@ -6120,11 +6039,7 @@ sctp_update_ep_vflag(struct sctp_inpcb *inp) struct sctp_laddr *laddr; /* first clear the flag */ -#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) - inp->inp_vflag = 0; -#else inp->ip_inp.inp.inp_vflag = 0; -#endif /* set the flag based on addresses on the ep list */ LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { if (laddr->ifa == NULL) { @@ -6139,20 +6054,12 @@ sctp_update_ep_vflag(struct sctp_inpcb *inp) switch (laddr->ifa->address.sa.sa_family) { #ifdef INET6 case AF_INET6: -#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) - inp->inp_vflag |= INP_IPV6; -#else inp->ip_inp.inp.inp_vflag |= INP_IPV6; -#endif break; #endif #ifdef INET case AF_INET: -#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) - inp->inp_vflag |= INP_IPV4; -#else inp->ip_inp.inp.inp_vflag |= INP_IPV4; -#endif break; #endif #if defined(__Userspace__) @@ -6209,20 +6116,12 @@ sctp_add_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa, uint32_t ac switch (ifa->address.sa.sa_family) { #ifdef INET6 case AF_INET6: -#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) - inp->inp_vflag |= INP_IPV6; -#else inp->ip_inp.inp.inp_vflag |= INP_IPV6; -#endif break; #endif #ifdef INET case AF_INET: -#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) - inp->inp_vflag |= INP_IPV4; -#else inp->ip_inp.inp.inp_vflag |= INP_IPV4; -#endif break; #endif #if defined(__Userspace__) @@ -6240,7 +6139,6 @@ sctp_add_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa, uint32_t ac return; } - /* * select a new (hopefully reachable) destination net (should only be used * when we deleted an ep addr that is the only usable source address to reach @@ -6264,7 +6162,6 @@ sctp_select_primary_destination(struct sctp_tcb *stcb) /* I can't there from here! ...we're gonna die shortly... */ } - /* * Delete the address from the endpoint local address list. There is nothing * to be done if we are bound to all addresses @@ -6316,6 +6213,9 @@ sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (net->ro._s_addr == laddr->ifa) { /* Yep, purge src address selected */ +#if defined(__FreeBSD__) && !defined(__Userspace__) + RO_NHFREE(&net->ro); +#else sctp_rtentry_t *rt; /* delete this address if cached */ @@ -6324,6 +6224,7 @@ sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) RTFREE(rt); net->ro.ro_rt = NULL; } +#endif sctp_free_ifa(net->ro._s_addr); net->ro._s_addr = NULL; net->src_addr_selected = 0; @@ -6419,20 +6320,17 @@ sctp_del_local_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) return; } -#if defined(__FreeBSD__) -/* - * Temporarily remove for __APPLE__ until we use the Tiger equivalents - */ +#if defined(__FreeBSD__) && !defined(__Userspace__) /* sysctl */ static int sctp_max_number_of_assoc = SCTP_MAX_NUM_OF_ASOC; static int sctp_scale_up_for_address = SCTP_SCALE_FOR_ADDR; -#endif /* FreeBSD || APPLE */ +#endif - - -#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) +#if defined(__FreeBSD__) && !defined(__Userspace__) +#if defined(SCTP_MCORE_INPUT) && defined(SMP) struct sctp_mcore_ctrl *sctp_mcore_workers = NULL; int *sctp_cpuarry = NULL; + void sctp_queue_to_mcore(struct mbuf *m, int off, int cpu_to_use) { @@ -6440,6 +6338,7 @@ sctp_queue_to_mcore(struct mbuf *m, int off, int cpu_to_use) struct sctp_mcore_queue *qent; struct sctp_mcore_ctrl *wkq; int need_wake = 0; + if (sctp_mcore_workers == NULL) { /* Something went way bad during setup */ sctp_input_with_port(m, off, 0); @@ -6453,9 +6352,7 @@ sctp_queue_to_mcore(struct mbuf *m, int off, int cpu_to_use) sctp_input_with_port(m, off, 0); return; } -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 qent->vn = curvnet; -#endif qent->m = m; qent->off = off; qent->v6 = 0; @@ -6507,9 +6404,7 @@ sctp_mcore_thread(void *arg) if (qent) { TAILQ_REMOVE(&wkq->que, qent, next); SCTP_MCORE_QUNLOCK(wkq); -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 CURVNET_SET(qent->vn); -#endif m = qent->m; off = qent->off; v6 = qent->v6; @@ -6520,9 +6415,7 @@ sctp_mcore_thread(void *arg) SCTP_PRINTF("V6 not yet supported\n"); sctp_m_freem(m); } -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 CURVNET_RESTORE(); -#endif SCTP_MCORE_QLOCK(wkq); } wkq->running = 0; @@ -6576,30 +6469,21 @@ sctp_startup_mcore_threads(void) i++; } } - /* Now start them all */ CPU_FOREACH(cpu) { -#if __FreeBSD_version <= 701000 - (void)kthread_create(sctp_mcore_thread, - (void *)&sctp_mcore_workers[cpu], - &sctp_mcore_workers[cpu].thread_proc, - RFPROC, - SCTP_KTHREAD_PAGES, - SCTP_MCORE_NAME); - -#else (void)kproc_create(sctp_mcore_thread, (void *)&sctp_mcore_workers[cpu], &sctp_mcore_workers[cpu].thread_proc, - RFPROC, + 0, SCTP_KTHREAD_PAGES, SCTP_MCORE_NAME); -#endif - } } #endif -#if defined(__FreeBSD__) && __FreeBSD_cc_version >= 1400000 +#endif + +#if defined(__FreeBSD__) && !defined(__Userspace__) +#if defined(SCTP_NOT_YET) static struct mbuf * sctp_netisr_hdlr(struct mbuf *m, uintptr_t source) { @@ -6625,12 +6509,13 @@ sctp_netisr_hdlr(struct mbuf *m, uintptr_t source) tag = htonl(sh->v_tag); flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port); m->m_pkthdr.flowid = flowid; -/* FIX ME */ + /* FIX ME */ m->m_flags |= M_FLOWID; return (m); } -#endif +#endif +#endif void #if defined(__Userspace__) sctp_pcb_init(int start_threads) @@ -6652,7 +6537,7 @@ sctp_pcb_init(void) SCTP_BASE_VAR(sctp_pcb_initialized) = 1; #if defined(SCTP_PROCESS_LEVEL_LOCKS) -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) pthread_mutexattr_init(&SCTP_BASE_VAR(mtx_attr)); #ifdef INVARIANTS pthread_mutexattr_settype(&SCTP_BASE_VAR(mtx_attr), PTHREAD_MUTEX_ERRORCHECK); @@ -6660,7 +6545,7 @@ sctp_pcb_init(void) #endif #endif #if defined(SCTP_LOCAL_TRACE_BUF) -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) if (SCTP_BASE_SYSCTL(sctp_log) != NULL) { memset(SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log)); } @@ -6668,16 +6553,24 @@ sctp_pcb_init(void) memset(&SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log)); #endif #endif -#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT) +#if defined(__FreeBSD__) && !defined(__Userspace__) +#if defined(SMP) && defined(SCTP_USE_PERCPU_STAT) SCTP_MALLOC(SCTP_BASE_STATS, struct sctpstat *, ((mp_maxid+1) * sizeof(struct sctpstat)), SCTP_M_MCORE); +#endif #endif (void)SCTP_GETTIME_TIMEVAL(&tv); -#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT) +#if defined(__FreeBSD__) && !defined(__Userspace__) +#if defined(SMP) && defined(SCTP_USE_PERCPU_STAT) memset(SCTP_BASE_STATS, 0, sizeof(struct sctpstat) * (mp_maxid+1)); SCTP_BASE_STATS[PCPU_GET(cpuid)].sctps_discontinuitytime.tv_sec = (uint32_t)tv.tv_sec; SCTP_BASE_STATS[PCPU_GET(cpuid)].sctps_discontinuitytime.tv_usec = (uint32_t)tv.tv_usec; +#else + memset(&SCTP_BASE_STATS, 0, sizeof(struct sctpstat)); + SCTP_BASE_STAT(sctps_discontinuitytime).tv_sec = (uint32_t)tv.tv_sec; + SCTP_BASE_STAT(sctps_discontinuitytime).tv_usec = (uint32_t)tv.tv_usec; +#endif #else memset(&SCTP_BASE_STATS, 0, sizeof(struct sctpstat)); SCTP_BASE_STAT(sctps_discontinuitytime).tv_sec = (uint32_t)tv.tv_sec; @@ -6685,7 +6578,7 @@ sctp_pcb_init(void) #endif /* init the empty list of (All) Endpoints */ LIST_INIT(&SCTP_BASE_INFO(listhead)); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) LIST_INIT(&SCTP_BASE_INFO(inplisthead)); #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION) SCTP_BASE_INFO(sctbinfo).listhead = &SCTP_BASE_INFO(inplisthead); @@ -6708,21 +6601,11 @@ sctp_pcb_init(void) #endif #endif - /* init the hash table of endpoints */ -#if defined(__FreeBSD__) -#if defined(__FreeBSD_cc_version) && __FreeBSD_cc_version >= 440000 +#if defined(__FreeBSD__) && !defined(__Userspace__) TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &SCTP_BASE_SYSCTL(sctp_hashtblsize)); TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &SCTP_BASE_SYSCTL(sctp_pcbtblsize)); TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &SCTP_BASE_SYSCTL(sctp_chunkscale)); -#else - TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", SCTP_TCBHASHSIZE, - SCTP_BASE_SYSCTL(sctp_hashtblsize)); - TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", SCTP_PCBHASHSIZE, - SCTP_BASE_SYSCTL(sctp_pcbtblsize)); - TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", SCTP_CHUNKQUEUE_SCALE, - SCTP_BASE_SYSCTL(sctp_chunkscale)); -#endif #endif SCTP_BASE_INFO(sctp_asochash) = SCTP_HASH_INIT((SCTP_BASE_SYSCTL(sctp_hashtblsize) * 31), &SCTP_BASE_INFO(hashasocmark)); @@ -6731,8 +6614,6 @@ sctp_pcb_init(void) SCTP_BASE_INFO(sctp_tcpephash) = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_hashtblsize), &SCTP_BASE_INFO(hashtcpmark)); SCTP_BASE_INFO(hashtblsize) = SCTP_BASE_SYSCTL(sctp_hashtblsize); - - SCTP_BASE_INFO(sctp_vrfhash) = SCTP_HASH_INIT(SCTP_SIZE_OF_VRF_HASH, &SCTP_BASE_INFO(hashvrfmark)); @@ -6777,7 +6658,6 @@ sctp_pcb_init(void) sizeof(struct sctp_asconf_ack), (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale))); - /* Master Lock INIT for info structure */ SCTP_INP_INFO_LOCK_INIT(); SCTP_STATLOG_INIT_LOCK(); @@ -6817,7 +6697,7 @@ sctp_pcb_init(void) LIST_INIT(&SCTP_BASE_INFO(vtag_timewait)[i]); } #if defined(SCTP_PROCESS_LEVEL_LOCKS) -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) InitializeConditionVariable(&sctp_it_ctl.iterator_wakeup); #else (void)pthread_cond_init(&sctp_it_ctl.iterator_wakeup, NULL); @@ -6825,19 +6705,19 @@ sctp_pcb_init(void) #endif sctp_startup_iterator(); -#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) +#if defined(__FreeBSD__) && !defined(__Userspace__) +#if defined(SCTP_MCORE_INPUT) && defined(SMP) sctp_startup_mcore_threads(); +#endif #endif -#ifndef __Panda__ /* * INIT the default VRF which for BSD is the only one, other O/S's * may have more. But initially they must start with one and then * add the VRF's as addresses are added. */ sctp_init_vrf_list(SCTP_DEFAULT_VRF); -#endif -#if defined(__FreeBSD__) && __FreeBSD_cc_version >= 1400000 +#if defined(__FreeBSD__) && !defined(__Userspace__) && defined(SCTP_NOT_YET) if (ip_register_flow_handler(sctp_netisr_hdlr, IPPROTO_SCTP)) { SCTP_PRINTF("***SCTP- Error can't register netisr handler***\n"); } @@ -6878,14 +6758,14 @@ sctp_pcb_finish(void) return; } SCTP_BASE_VAR(sctp_pcb_initialized) = 0; -#if !defined(__FreeBSD__) +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) /* Notify the iterator to exit. */ SCTP_IPI_ITERATOR_WQ_LOCK(); sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_MUST_EXIT; sctp_wakeup_iterator(); SCTP_IPI_ITERATOR_WQ_UNLOCK(); #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) in_pcbinfo_detach(&SCTP_BASE_INFO(sctbinfo)); #endif @@ -6898,7 +6778,7 @@ sctp_pcb_finish(void) thread_deallocate(sctp_it_ctl.thread_proc); SCTP_IPI_ITERATOR_WQ_UNLOCK(); #endif -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) if (sctp_it_ctl.iterator_thread_obj != NULL) { NTSTATUS status = STATUS_SUCCESS; @@ -6912,8 +6792,8 @@ sctp_pcb_finish(void) } #endif #if defined(__Userspace__) - if (sctp_it_ctl.thread_proc) { -#if defined(__Userspace_os_Windows) + if (SCTP_BASE_VAR(iterator_thread_started)) { +#if defined(_WIN32) WaitForSingleObject(sctp_it_ctl.thread_proc, INFINITE); CloseHandle(sctp_it_ctl.thread_proc); sctp_it_ctl.thread_proc = NULL; @@ -6924,7 +6804,7 @@ sctp_pcb_finish(void) } #endif #if defined(SCTP_PROCESS_LEVEL_LOCKS) -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) DeleteConditionVariable(&sctp_it_ctl.iterator_wakeup); #else pthread_cond_destroy(&sctp_it_ctl.iterator_wakeup); @@ -6936,17 +6816,17 @@ sctp_pcb_finish(void) * The only way FreeBSD reaches here is if we have VRF's * but we still add the ifdef to make it compile on old versions. */ -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) retry: #endif SCTP_IPI_ITERATOR_WQ_LOCK(); -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) /* * sctp_iterator_worker() might be working on an it entry without * holding the lock. We won't find it on the list either and * continue and free/destroy it. While holding the lock, spin, to * avoid the race condition as sctp_iterator_worker() will have to - * wait to re-aquire the lock. + * wait to re-acquire the lock. */ if (sctp_it_ctl.iterator_running != 0 || sctp_it_ctl.cur_it != NULL) { SCTP_IPI_ITERATOR_WQ_UNLOCK(); @@ -6957,7 +6837,7 @@ retry: } #endif TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) if (it->vn != curvnet) { continue; } @@ -6969,7 +6849,7 @@ retry: SCTP_FREE(it,SCTP_M_ITER); } SCTP_IPI_ITERATOR_WQ_UNLOCK(); -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) SCTP_ITERATOR_LOCK(); if ((sctp_it_ctl.cur_it) && (sctp_it_ctl.cur_it->vn == curvnet)) { @@ -6977,7 +6857,7 @@ retry: } SCTP_ITERATOR_UNLOCK(); #endif -#if !defined(__FreeBSD__) +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) SCTP_IPI_ITERATOR_WQ_DESTROY(); SCTP_ITERATOR_LOCK_DESTROY(); #endif @@ -6997,6 +6877,7 @@ retry: * free the vrf/ifn/ifa lists and hashes (be sure address monitor * is destroyed first). */ + SCTP_IPI_ADDR_WLOCK(); vrf_bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(SCTP_DEFAULT_VRFID & SCTP_BASE_INFO(hashvrfmark))]; LIST_FOREACH_SAFE(vrf, vrf_bucket, next_vrf, nvrf) { LIST_FOREACH_SAFE(ifn, &vrf->ifnlist, next_ifn, nifn) { @@ -7016,6 +6897,7 @@ retry: LIST_REMOVE(vrf, next_vrf); SCTP_FREE(vrf, SCTP_M_VRF); } + SCTP_IPI_ADDR_WUNLOCK(); /* free the vrf hashes */ SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_vrfhash), SCTP_BASE_INFO(hashvrfmark)); SCTP_HASH_FREE(SCTP_BASE_INFO(vrf_ifn_hash), SCTP_BASE_INFO(vrf_ifn_hashmark)); @@ -7038,14 +6920,14 @@ retry: } /* free the locks and mutexes */ -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_TIMERQ_LOCK_DESTROY(); #endif #ifdef SCTP_PACKET_LOGGING SCTP_IP_PKTLOG_DESTROY(); #endif SCTP_IPI_ADDR_DESTROY(); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_IPI_COUNT_DESTROY(); #endif SCTP_STATLOG_DESTROY(); @@ -7053,7 +6935,7 @@ retry: SCTP_WQ_ADDR_DESTROY(); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION) lck_grp_attr_free(SCTP_BASE_INFO(sctbinfo).mtx_grp_attr); lck_grp_free(SCTP_BASE_INFO(sctbinfo).mtx_grp); @@ -7078,7 +6960,7 @@ retry: if (SCTP_BASE_INFO(sctp_tcpephash) != NULL) SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_tcpephash), SCTP_BASE_INFO(hashtcpmark)); -#if defined(__Windows__) || defined(__FreeBSD__) || defined(__Userspace__) +#if defined(_WIN32) || defined(__FreeBSD__) || defined(__Userspace__) SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_ep)); SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asoc)); SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_laddr)); @@ -7089,12 +6971,13 @@ retry: SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asconf)); SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asconf_ack)); #endif -#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT) +#if defined(__FreeBSD__) && !defined(__Userspace__) +#if defined(SMP) && defined(SCTP_USE_PERCPU_STAT) SCTP_FREE(SCTP_BASE_STATS, SCTP_M_MCORE); #endif +#endif } - int sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m, int offset, int limit, @@ -7171,6 +7054,7 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m, peer_supports_prsctp = 0; peer_supports_auth = 0; peer_supports_asconf = 0; + peer_supports_asconf_ack = 0; peer_supports_reconfig = 0; peer_supports_nrsack = 0; peer_supports_pktdrop = 0; @@ -7318,12 +7202,12 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m, char msg[SCTP_DIAG_INFO_LEN]; /* in setup state we abort this guy */ - snprintf(msg, sizeof(msg), - "%s:%d at %s", __FILE__, __LINE__, __func__); + SCTP_SNPRINTF(msg, sizeof(msg), + "%s:%d at %s", __FILE__, __LINE__, __func__); op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), msg); sctp_abort_an_association(stcb_tmp->sctp_ep, - stcb_tmp, op_err, + stcb_tmp, op_err, false, SCTP_SO_NOT_LOCKED); goto add_it_now; } @@ -7412,12 +7296,12 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m, char msg[SCTP_DIAG_INFO_LEN]; /* in setup state we abort this guy */ - snprintf(msg, sizeof(msg), - "%s:%d at %s", __FILE__, __LINE__, __func__); + SCTP_SNPRINTF(msg, sizeof(msg), + "%s:%d at %s", __FILE__, __LINE__, __func__); op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), msg); sctp_abort_an_association(stcb_tmp->sctp_ep, - stcb_tmp, op_err, + stcb_tmp, op_err, false, SCTP_SO_NOT_LOCKED); goto add_it_now6; } @@ -7557,7 +7441,6 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m, default: /* one I have not learned yet */ break; - } } } else if (ptype == SCTP_RANDOM) { @@ -7640,7 +7523,6 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m, saw_asconf = 1; if (chunks->chunk_types[i] == SCTP_ASCONF_ACK) saw_asconf_ack = 1; - } got_chklist = 1; } else if ((ptype == SCTP_HEARTBEAT_INFO) || @@ -7652,7 +7534,7 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m, (ptype == SCTP_DEL_IP_ADDRESS) || (ptype == SCTP_ERROR_CAUSE_IND) || (ptype == SCTP_SUCCESS_REPORT)) { - /* don't care */ ; + /* don't care */ } else { if ((ptype & 0x8000) == 0x0000) { /* @@ -7672,7 +7554,7 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m, break; } phdr = sctp_get_next_param(m, offset, ¶m_buf, - sizeof(param_buf)); + sizeof(param_buf)); } /* Now check to see if we need to purge any addresses */ TAILQ_FOREACH_SAFE(net, &stcb->asoc.nets, sctp_next, nnet) { @@ -7682,11 +7564,15 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m, /* remove and free it */ stcb->asoc.numnets--; TAILQ_REMOVE(&stcb->asoc.nets, net, sctp_next); - sctp_free_remote_addr(net); + if (net == stcb->asoc.alternate) { + sctp_free_remote_addr(stcb->asoc.alternate); + stcb->asoc.alternate = NULL; + } if (net == stcb->asoc.primary_destination) { stcb->asoc.primary_destination = NULL; sctp_select_primary_destination(stcb); } + sctp_free_remote_addr(net); } } if ((stcb->asoc.ecn_supported == 1) && @@ -7812,24 +7698,15 @@ sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa, } } -int +bool sctp_is_vtag_good(uint32_t tag, uint16_t lport, uint16_t rport, struct timeval *now) { - /* - * This function serves two purposes. It will see if a TAG can be - * re-used and return 1 for yes it is ok and 0 for don't use that - * tag. A secondary function it will do is purge out old tags that - * can be removed. - */ - struct sctpvtaghead *chain; - struct sctp_tagblock *twait_block; struct sctpasochead *head; struct sctp_tcb *stcb; - int i; - SCTP_INP_INFO_RLOCK(); - head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag, - SCTP_BASE_INFO(hashasocmark))]; + SCTP_INP_INFO_LOCK_ASSERT(); + + head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag, SCTP_BASE_INFO(hashasocmark))]; LIST_FOREACH(stcb, head, sctp_asocs) { /* We choose not to lock anything here. TCB's can't be * removed since we have the read lock, so they can't @@ -7848,40 +7725,11 @@ sctp_is_vtag_good(uint32_t tag, uint16_t lport, uint16_t rport, struct timeval * if (stcb->sctp_ep->sctp_lport != lport) { continue; } - /* Its a used tag set */ - SCTP_INP_INFO_RUNLOCK(); - return (0); + /* The tag is currently used, so don't use it. */ + return (false); } } - chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; - /* Now what about timed wait ? */ - LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { - /* - * Block(s) are present, lets see if we have this tag in the - * list - */ - for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { - if (twait_block->vtag_block[i].v_tag == 0) { - /* not used */ - continue; - } else if ((long)twait_block->vtag_block[i].tv_sec_at_expire < - now->tv_sec) { - /* Audit expires this guy */ - twait_block->vtag_block[i].tv_sec_at_expire = 0; - twait_block->vtag_block[i].v_tag = 0; - twait_block->vtag_block[i].lport = 0; - twait_block->vtag_block[i].rport = 0; - } else if ((twait_block->vtag_block[i].v_tag == tag) && - (twait_block->vtag_block[i].lport == lport) && - (twait_block->vtag_block[i].rport == rport)) { - /* Bad tag, sorry :< */ - SCTP_INP_INFO_RUNLOCK(); - return (0); - } - } - } - SCTP_INP_INFO_RUNLOCK(); - return (1); + return (!sctp_is_in_timewait(tag, lport, rport, (uint32_t)now->tv_sec)); } static void @@ -7913,7 +7761,7 @@ sctp_drain_mbufs(struct sctp_tcb *stcb) for (strmat = 0; strmat < asoc->streamincnt; strmat++) { TAILQ_FOREACH_SAFE(control, &asoc->strmin[strmat].inqueue, next_instrm, ncontrol) { #ifdef INVARIANTS - if (control->on_strm_q != SCTP_ON_ORDERED ) { + if (control->on_strm_q != SCTP_ON_ORDERED) { panic("Huh control: %p on_q: %d -- not ordered?", control, control->on_strm_q); } @@ -7973,7 +7821,7 @@ sctp_drain_mbufs(struct sctp_tcb *stcb) } TAILQ_FOREACH_SAFE(control, &asoc->strmin[strmat].uno_inqueue, next_instrm, ncontrol) { #ifdef INVARIANTS - if (control->on_strm_q != SCTP_ON_UNORDERED ) { + if (control->on_strm_q != SCTP_ON_UNORDERED) { panic("Huh control: %p on_q: %d -- not unordered?", control, control->on_strm_q); } @@ -8068,7 +7916,8 @@ sctp_drain_mbufs(struct sctp_tcb *stcb) * asoc->highest_tsn_inside_map? */ asoc->last_revoke_count = cnt; - (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); + sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, + SCTP_FROM_SCTP_PCB + SCTP_LOC_11); /*sa_ignore NO_NULL_CHK*/ sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_DRAIN, SCTP_SO_NOT_LOCKED); @@ -8092,7 +7941,7 @@ sctp_drain() * is LOW on MBUF's and needs help. This is where reneging will * occur. We really hope this does NOT happen! */ -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) VNET_ITERATOR_DECL(vnet_iter); #else struct sctp_inpcb *inp; @@ -8103,7 +7952,7 @@ sctp_drain() return; } #endif -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) VNET_LIST_RLOCK_NOSLEEP(); VNET_FOREACH(vnet_iter) { CURVNET_SET(vnet_iter); @@ -8111,7 +7960,7 @@ sctp_drain() struct sctp_tcb *stcb; #endif -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) SCTP_STAT_INCR(sctps_protocol_drain_calls); if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { #ifdef VIMAGE @@ -8134,7 +7983,7 @@ sctp_drain() SCTP_INP_RUNLOCK(inp); } SCTP_INP_INFO_RUNLOCK(); -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) CURVNET_RESTORE(); } VNET_LIST_RUNLOCK_NOSLEEP(); @@ -8194,7 +8043,7 @@ sctp_initiate_iterator(inp_func inpf, it->asoc_state = asoc_state; it->function_inp_end = inpe; it->no_chunk_output = chunk_output_off; -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) it->vn = curvnet; #endif if (s_inp) { @@ -8210,7 +8059,6 @@ sctp_initiate_iterator(inp_func inpf, } SCTP_INP_INFO_RUNLOCK(); it->iterator_flags = SCTP_ITERATOR_DO_ALL_INP; - } SCTP_IPI_ITERATOR_WQ_LOCK(); if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_pcb.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_pcb.h index 551bbfb8e..6ce03a875 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_pcb.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_pcb.h @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_pcb.h 354018 2019-10-24 09:22:23Z tuexen $"); +__FBSDID("$FreeBSD$"); #endif #ifndef _NETINET_SCTP_PCB_H_ @@ -147,9 +147,8 @@ struct sctp_tagblock { struct sctp_timewait vtag_block[SCTP_NUMBER_IN_VTAG_BLOCK]; }; - struct sctp_epinfo { -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #ifdef INET struct socket *udp4_tun_socket; #endif @@ -189,7 +188,7 @@ struct sctp_epinfo { struct sctppcbhead listhead; struct sctpladdr addr_wq; -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) struct inpcbhead inplisthead; struct inpcbinfo sctbinfo; #endif @@ -204,18 +203,10 @@ struct sctp_epinfo { sctp_zone_t ipi_zone_asconf; sctp_zone_t ipi_zone_asconf_ack; -#if defined(__FreeBSD__) && __FreeBSD_version >= 503000 -#if __FreeBSD_version <= 602000 - struct mtx ipi_ep_mtx; -#else +#if defined(__FreeBSD__) && !defined(__Userspace__) struct rwlock ipi_ep_mtx; -#endif struct mtx ipi_iterator_wq_mtx; -#if __FreeBSD_version <= 602000 - struct mtx ipi_addr_mtx; -#else struct rwlock ipi_addr_mtx; -#endif struct mtx ipi_pktlog_mtx; struct mtx wq_addr_mtx; #elif defined(SCTP_PROCESS_LEVEL_LOCKS) @@ -224,7 +215,7 @@ struct sctp_epinfo { userland_mutex_t ipi_count_mtx; userland_mutex_t ipi_pktlog_mtx; userland_mutex_t wq_addr_mtx; -#elif defined(__APPLE__) +#elif defined(__APPLE__) && !defined(__Userspace__) #ifdef _KERN_LOCKS_H_ lck_mtx_t *ipi_addr_mtx; lck_mtx_t *ipi_count_mtx; @@ -235,13 +226,12 @@ struct sctp_epinfo { void *ipi_count_mtx; void *logging_mtx; #endif /* _KERN_LOCKS_H_ */ -#elif defined(__Windows__) +#elif defined(_WIN32) && !defined(__Userspace__) struct rwlock ipi_ep_lock; struct rwlock ipi_addr_lock; struct spinlock ipi_pktlog_mtx; struct rwlock wq_addr_mtx; #elif defined(__Userspace__) - /* TODO decide on __Userspace__ locks */ #endif uint32_t ipi_count_ep; @@ -286,14 +276,17 @@ struct sctp_epinfo { #endif }; - struct sctp_base_info { /* All static structures that * anchor the system must be here. */ struct sctp_epinfo sctppcbinfo; -#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT) +#if defined(__FreeBSD__) && !defined(__Userspace__) +#if defined(SMP) && defined(SCTP_USE_PERCPU_STAT) struct sctpstat *sctpstat; +#else + struct sctpstat sctpstat; +#endif #else struct sctpstat sctpstat; #endif @@ -305,17 +298,19 @@ struct sctp_base_info { int packet_log_end; uint8_t packet_log_buffer[SCTP_PACKET_LOG_SIZE]; #endif -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) eventhandler_tag eh_tag; #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) int sctp_main_timer_ticks; #endif #if defined(__Userspace__) userland_mutex_t timer_mtx; userland_thread_t timer_thread; int timer_thread_should_exit; -#if !defined(__Userspace_os_Windows) + int iterator_thread_started; + int timer_thread_started; +#if !defined(_WIN32) pthread_mutexattr_t mtx_attr; #if defined(INET) || defined(INET6) int userspace_route; @@ -323,7 +318,7 @@ struct sctp_base_info { #endif #endif #ifdef INET -#if defined(__Userspace_os_Windows) && !defined(__MINGW32__) +#if defined(_WIN32) && !defined(__MINGW32__) SOCKET userspace_rawsctp; SOCKET userspace_udpsctp; #else @@ -334,7 +329,7 @@ struct sctp_base_info { userland_thread_t recvthreadudp; #endif #ifdef INET6 -#if defined(__Userspace_os_Windows) && !defined(__MINGW32__) +#if defined(_WIN32) && !defined(__MINGW32__) SOCKET userspace_rawsctp6; SOCKET userspace_udpsctp6; #else @@ -361,11 +356,11 @@ struct sctp_pcb { uint32_t secret_key[SCTP_HOW_MANY_SECRETS][SCTP_NUMBER_OF_SECRETS]; unsigned int size_of_a_cookie; - unsigned int sctp_timeoutticks[SCTP_NUM_TMRS]; - unsigned int sctp_minrto; - unsigned int sctp_maxrto; - unsigned int initial_rto; - int initial_init_rto_max; + uint32_t sctp_timeoutticks[SCTP_NUM_TMRS]; + uint32_t sctp_minrto; + uint32_t sctp_maxrto; + uint32_t initial_rto; + uint32_t initial_init_rto_max; unsigned int sctp_sack_freq; uint32_t sctp_sws_sender; @@ -408,7 +403,7 @@ struct sctp_pcb { uint32_t def_cookie_life; /* defaults to 0 */ - int auto_close_time; + uint32_t auto_close_time; uint32_t initial_sequence_debug; uint32_t adaptation_layer_indicator; uint8_t adaptation_layer_indicator_provided; @@ -443,7 +438,6 @@ struct sctp_pcbtsn_rlog { }; #define SCTP_READ_LOG_SIZE 135 /* we choose the number to make a pcb a page */ - struct sctp_inpcb { /*- * put an inpcb in front of it all, kind of a waste but we need to @@ -454,8 +448,7 @@ struct sctp_inpcb { char align[(sizeof(struct inpcb) + SCTP_ALIGNM1) & ~SCTP_ALIGNM1]; } ip_inp; - -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) /* leave some space in case i386 inpcb is bigger than ppc */ uint8_t padding[128]; #endif @@ -464,7 +457,7 @@ struct sctp_inpcb { struct sctp_readhead read_queue; LIST_ENTRY(sctp_inpcb) sctp_list; /* lists all endpoints */ - /* hash of all endpoints for model */ + /* hash of all endpoints for model */ LIST_ENTRY(sctp_inpcb) sctp_hash; /* count of local addresses bound, 0 if bound all */ int laddr_count; @@ -488,7 +481,6 @@ struct sctp_inpcb { #ifdef SCTP_TRACK_FREED_ASOCS struct sctpasochead sctp_asoc_free_list; #endif - struct sctp_iterator *inp_starting_point_for_iterator; uint32_t sctp_frag_point; uint32_t partial_delivery_point; uint32_t sctp_context; @@ -512,26 +504,9 @@ struct sctp_inpcb { * they are candidates with sctp_sendm for * de-supporting. */ -#ifdef __Panda__ - pakhandle_type pak_to_read; - pakhandle_type pak_to_read_sendq; -#endif struct mbuf *pkt, *pkt_last; struct mbuf *control; -#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) -#ifndef INP_IPV6 -#define INP_IPV6 0x1 -#endif -#ifndef INP_IPV4 -#define INP_IPV4 0x2 -#endif - uint8_t inp_vflag; - /* TODO __Userspace__ where is our inp_vlag going to be? */ - uint8_t inp_ip_ttl; - uint8_t inp_ip_tos; /* defined as macro in user_inpcb.h */ - uint8_t inp_ip_resv; -#endif -#if defined(__FreeBSD__) && __FreeBSD_version >= 503000 +#if defined(__FreeBSD__) && !defined(__Userspace__) struct mtx inp_mtx; struct mtx inp_create_mtx; struct mtx inp_rdata_mtx; @@ -541,7 +516,7 @@ struct sctp_inpcb { userland_mutex_t inp_create_mtx; userland_mutex_t inp_rdata_mtx; int32_t refcount; -#elif defined(__APPLE__) +#elif defined(__APPLE__) && !defined(__Userspace__) #if defined(SCTP_APPLE_RWLOCK) lck_rw_t *inp_mtx; #else @@ -549,16 +524,15 @@ struct sctp_inpcb { #endif lck_mtx_t *inp_create_mtx; lck_mtx_t *inp_rdata_mtx; -#elif defined(__Windows__) +#elif defined(_WIN32) && !defined(__Userspace__) struct rwlock inp_lock; struct spinlock inp_create_lock; struct spinlock inp_rdata_lock; int32_t refcount; #elif defined(__Userspace__) - /* TODO decide on __Userspace__ locks */ int32_t refcount; #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) int32_t refcount; uint32_t lock_caller1; @@ -602,7 +576,7 @@ struct sctp_inpcb { int (*recv_callback)(struct socket *, union sctp_sockstore, void *, size_t, struct sctp_rcvinfo, int, void *); uint32_t send_sb_threshold; - int (*send_callback)(struct socket *, uint32_t); + int (*send_callback)(struct socket *, uint32_t, void *); #endif }; @@ -610,8 +584,9 @@ struct sctp_inpcb { int register_recv_cb (struct socket *, int (*)(struct socket *, union sctp_sockstore, void *, size_t, struct sctp_rcvinfo, int, void *)); -int register_send_cb (struct socket *, uint32_t, int (*)(struct socket *, uint32_t)); +int register_send_cb (struct socket *, uint32_t, int (*)(struct socket *, uint32_t, void *)); int register_ulp_info (struct socket *, void *); +int retrieve_ulp_info (struct socket *, void **); #endif struct sctp_tcb { @@ -639,34 +614,30 @@ struct sctp_tcb { int freed_from_where; uint16_t rport; /* remote port in network format */ uint16_t resv; -#if defined(__FreeBSD__) && __FreeBSD_version >= 503000 +#if defined(__FreeBSD__) && !defined(__Userspace__) struct mtx tcb_mtx; struct mtx tcb_send_mtx; #elif defined(SCTP_PROCESS_LEVEL_LOCKS) userland_mutex_t tcb_mtx; userland_mutex_t tcb_send_mtx; -#elif defined(__APPLE__) +#elif defined(__APPLE__) && !defined(__Userspace__) lck_mtx_t* tcb_mtx; lck_mtx_t* tcb_send_mtx; -#elif defined(__Windows__) +#elif defined(_WIN32) && !defined(__Userspace__) struct spinlock tcb_lock; struct spinlock tcb_send_lock; #elif defined(__Userspace__) - /* TODO decide on __Userspace__ locks */ #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) uint32_t caller1; uint32_t caller2; uint32_t caller3; #endif }; - -#if defined(__FreeBSD__) && __FreeBSD_version >= 503000 - +#if defined(__FreeBSD__) && !defined(__Userspace__) #include - -#elif defined(__APPLE__) +#elif defined(__APPLE__) && !defined(__Userspace__) /* * Apple MacOS X 10.4 "Tiger" */ @@ -677,7 +648,7 @@ struct sctp_tcb { #include -#elif defined(__Windows__) +#elif defined(_WIN32) && !defined(__Userspace__) #include @@ -692,15 +663,14 @@ struct sctp_tcb { #include #endif -/* TODO where to put non-_KERNEL things for __Userspace__? */ #if defined(_KERNEL) || defined(__Userspace__) /* Attention Julian, this is the extern that * goes with the base info. sctp_pcb.c has * the real definition. */ -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 -VNET_DECLARE(struct sctp_base_info, system_base_info) ; +#if defined(__FreeBSD__) && !defined(__Userspace__) +VNET_DECLARE(struct sctp_base_info, system_base_info); #else extern struct sctp_base_info system_base_info; #endif @@ -742,26 +712,35 @@ void sctp_update_ifn_mtu(uint32_t ifn_index, uint32_t mtu); void sctp_free_ifn(struct sctp_ifn *sctp_ifnp); void sctp_free_ifa(struct sctp_ifa *sctp_ifap); - void sctp_del_addr_from_vrf(uint32_t vrfid, struct sockaddr *addr, uint32_t ifn_index, const char *if_name); - - struct sctp_nets *sctp_findnet(struct sctp_tcb *, struct sockaddr *); struct sctp_inpcb *sctp_pcb_findep(struct sockaddr *, int, int, uint32_t); -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 -int sctp_inpcb_bind(struct socket *, struct sockaddr *, - struct sctp_ifa *,struct thread *); -#elif defined(__Windows__) -int sctp_inpcb_bind(struct socket *, struct sockaddr *, - struct sctp_ifa *,PKTHREAD); +#if defined(__FreeBSD__) && !defined(__Userspace__) +int +sctp_inpcb_bind(struct socket *, struct sockaddr *, + struct sctp_ifa *, struct thread *); +int +sctp_inpcb_bind_locked(struct sctp_inpcb *, struct sockaddr *, + struct sctp_ifa *, struct thread *); +#elif defined(_WIN32) && !defined(__Userspace__) +int +sctp_inpcb_bind(struct socket *, struct sockaddr *, + struct sctp_ifa *, PKTHREAD); +int +sctp_inpcb_bind_locked(struct sctp_inpcb *, struct sockaddr *, + struct sctp_ifa *, PKTHREAD); #else /* struct proc is a dummy for __Userspace__ */ -int sctp_inpcb_bind(struct socket *, struct sockaddr *, - struct sctp_ifa *, struct proc *); +int +sctp_inpcb_bind(struct socket *, struct sockaddr *, + struct sctp_ifa *, struct proc *); +int +sctp_inpcb_bind_locked(struct sctp_inpcb *, struct sockaddr *, + struct sctp_ifa *, struct proc *); #endif struct sctp_tcb * @@ -809,33 +788,34 @@ void sctp_inpcb_free(struct sctp_inpcb *, int, int); #define SCTP_DONT_INITIALIZE_AUTH_PARAMS 0 #define SCTP_INITIALIZE_AUTH_PARAMS 1 -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 +#if defined(__FreeBSD__) && !defined(__Userspace__) struct sctp_tcb * sctp_aloc_assoc(struct sctp_inpcb *, struct sockaddr *, - int *, uint32_t, uint32_t, uint16_t, uint16_t, struct thread *, - int); -#elif defined(__Windows__) + int *, uint32_t, uint32_t, uint32_t, uint16_t, uint16_t, + struct thread *, int); struct sctp_tcb * -sctp_aloc_assoc(struct sctp_inpcb *, struct sockaddr *, - int *, uint32_t, uint32_t, uint16_t, uint16_t, PKTHREAD, int); +sctp_aloc_assoc_connected(struct sctp_inpcb *, struct sockaddr *, + int *, uint32_t, uint32_t, uint32_t, uint16_t, uint16_t, + struct thread *, int); +#elif defined(_WIN32) && !defined(__Userspace__) +struct sctp_tcb * +sctp_aloc_assoc(struct sctp_inpcb *, struct sockaddr *, int *, uint32_t, + uint32_t, uint32_t, uint16_t, uint16_t, PKTHREAD, int); +struct sctp_tcb * +sctp_aloc_assoc_connected(struct sctp_inpcb *, struct sockaddr *, int *, uint32_t, + uint32_t, uint32_t, uint16_t, uint16_t, PKTHREAD, int); #else /* proc will be NULL for __Userspace__ */ struct sctp_tcb * -sctp_aloc_assoc(struct sctp_inpcb *, struct sockaddr *, - int *, uint32_t, uint32_t, uint16_t, uint16_t, struct proc *, - int); +sctp_aloc_assoc(struct sctp_inpcb *, struct sockaddr *, int *, uint32_t, + uint32_t, uint32_t, uint16_t, uint16_t, struct proc *, int); +struct sctp_tcb * +sctp_aloc_assoc_connected(struct sctp_inpcb *, struct sockaddr *, int *, uint32_t, + uint32_t, uint32_t, uint16_t, uint16_t, struct proc *, int); #endif int sctp_free_assoc(struct sctp_inpcb *, struct sctp_tcb *, int, int); - -void sctp_delete_from_timewait(uint32_t, uint16_t, uint16_t); - -int sctp_is_in_timewait(uint32_t tag, uint16_t lport, uint16_t rport); - -void -sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time, uint16_t lport, uint16_t rport); - void sctp_add_local_addr_ep(struct sctp_inpcb *, struct sctp_ifa *, uint32_t); void sctp_del_local_addr_ep(struct sctp_inpcb *, struct sctp_ifa *); @@ -865,7 +845,8 @@ int sctp_set_primary_addr(struct sctp_tcb *, struct sockaddr *, struct sctp_nets *); -int sctp_is_vtag_good(uint32_t, uint16_t lport, uint16_t rport, struct timeval *); +bool +sctp_is_vtag_good(uint32_t, uint16_t lport, uint16_t rport, struct timeval *); /* void sctp_drain(void); */ @@ -889,10 +870,12 @@ sctp_initiate_iterator(inp_func inpf, end_func ef, struct sctp_inpcb *, uint8_t co_off); -#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) +#if defined(__FreeBSD__) && !defined(__Userspace__) +#if defined(SCTP_MCORE_INPUT) && defined(SMP) void sctp_queue_to_mcore(struct mbuf *m, int off, int cpu_to_use); +#endif #endif #endif /* _KERNEL */ diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_peeloff.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_peeloff.c index 8e81065a4..548c3d82b 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_peeloff.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_peeloff.c @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_peeloff.c 337708 2018-08-13 13:58:45Z tuexen $"); +__FBSDID("$FreeBSD$"); #endif #include @@ -49,10 +49,6 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_peeloff.c 337708 2018-08-13 13:58:45Z #include #include -#if defined(__APPLE__) -#define APPLE_FILE_NO 5 -#endif - int sctp_can_peel_off(struct socket *head, sctp_assoc_t assoc_id) { @@ -138,7 +134,6 @@ sctp_do_peeloff(struct socket *head, struct socket *so, sctp_assoc_t assoc_id) n_inp->sctp_context = inp->sctp_context; n_inp->max_cwnd = inp->max_cwnd; n_inp->local_strreset_support = inp->local_strreset_support; - n_inp->inp_starting_point_for_iterator = NULL; /* copy in the authentication parameters from the original endpoint */ if (n_inp->sctp_ep.local_hmacs) sctp_free_hmaclist(n_inp->sctp_ep.local_hmacs); @@ -164,7 +159,7 @@ sctp_do_peeloff(struct socket *head, struct socket *so, sctp_assoc_t assoc_id) atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, SBL_WAIT); #else sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, M_WAITOK); @@ -178,14 +173,6 @@ sctp_do_peeloff(struct socket *head, struct socket *so, sctp_assoc_t assoc_id) struct socket * sctp_get_peeloff(struct socket *head, sctp_assoc_t assoc_id, int *error) { -#if defined(__Userspace__) - /* if __Userspace__ chooses to originally not support peeloff, put it here... */ -#endif -#if defined(__Panda__) - SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, EINVAL); - *error = EINVAL; - return (NULL); -#else struct socket *newso; struct sctp_inpcb *inp, *n_inp; struct sctp_tcb *stcb; @@ -205,18 +192,15 @@ sctp_get_peeloff(struct socket *head, sctp_assoc_t assoc_id, int *error) } atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) CURVNET_SET(head->so_vnet); #endif newso = sonewconn(head, SS_ISCONNECTED -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) , NULL -#elif defined(__Panda__) - /* place this socket in the assoc's vrf id */ - , NULL, stcb->asoc.vrf_id #endif ); -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) CURVNET_RESTORE(); #endif if (newso == NULL) { @@ -227,7 +211,7 @@ sctp_get_peeloff(struct socket *head, sctp_assoc_t assoc_id, int *error) return (NULL); } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) else { SCTP_SOCKET_LOCK(newso, 1); } @@ -288,7 +272,6 @@ sctp_get_peeloff(struct socket *head, sctp_assoc_t assoc_id, int *error) SOCK_UNLOCK(newso); /* We remove it right away */ -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__) #ifdef SCTP_LOCK_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) { sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK); @@ -297,16 +280,6 @@ sctp_get_peeloff(struct socket *head, sctp_assoc_t assoc_id, int *error) TAILQ_REMOVE(&head->so_comp, newso, so_list); head->so_qlen--; SOCK_UNLOCK(head); -#else - newso = TAILQ_FIRST(&head->so_q); - if (soqremque(newso, 1) == 0) { - SCTP_PRINTF("soremque failed, peeloff-fails (invarients would panic)\n"); - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, ENOTCONN); - *error = ENOTCONN; - return (NULL); - - } -#endif /* * Now we must move it from one hash table to another and get the * stcb in the right place. @@ -318,13 +291,12 @@ sctp_get_peeloff(struct socket *head, sctp_assoc_t assoc_id, int *error) * And now the final hack. We move data in the pending side i.e. * head to the new socket buffer. Let the GRUBBING begin :-0 */ -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, SBL_WAIT); #else sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, M_WAITOK); #endif atomic_subtract_int(&stcb->asoc.refcnt, 1); return (newso); -#endif } #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_peeloff.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_peeloff.h index adefcded1..7e1c5ecee 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_peeloff.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_peeloff.h @@ -32,7 +32,7 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include __FBSDID("$FreeBSD: head/sys/netinet/sctp_peeloff.h 309607 2016-12-06 10:21:25Z tuexen $"); #endif @@ -42,13 +42,13 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_peeloff.h 309607 2016-12-06 10:21:25Z #if defined(HAVE_SCTP_PEELOFF_SOCKOPT) /* socket option peeloff */ struct sctp_peeloff_opt { -#if !defined(__Windows__) +#if !(defined(_WIN32) && !defined(__Userspace__)) int s; #else HANDLE s; #endif sctp_assoc_t assoc_id; -#if !defined(__Windows__) +#if !(defined(_WIN32) && !defined(__Userspace__)) int new_sd; #else HANDLE new_sd; diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_process_lock.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_process_lock.h index ca6692120..dfcad8207 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_process_lock.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_process_lock.h @@ -53,13 +53,16 @@ * per socket level locking */ -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) /* Lock for INFO stuff */ #define SCTP_INP_INFO_LOCK_INIT() #define SCTP_INP_INFO_RLOCK() #define SCTP_INP_INFO_RUNLOCK() #define SCTP_INP_INFO_WLOCK() #define SCTP_INP_INFO_WUNLOCK() +#define SCTP_INP_INFO_LOCK_ASSERT() +#define SCTP_INP_INFO_RLOCK_ASSERT() +#define SCTP_INP_INFO_WLOCK_ASSERT() #define SCTP_INP_INFO_LOCK_DESTROY() #define SCTP_IPI_COUNT_INIT() #define SCTP_IPI_COUNT_DESTROY() @@ -69,6 +72,9 @@ #define SCTP_INP_INFO_RUNLOCK() #define SCTP_INP_INFO_WLOCK() #define SCTP_INP_INFO_WUNLOCK() +#define SCTP_INP_INFO_LOCK_ASSERT() +#define SCTP_INP_INFO_RLOCK_ASSERT() +#define SCTP_INP_INFO_WLOCK_ASSERT() #define SCTP_INP_INFO_LOCK_DESTROY() #define SCTP_IPI_COUNT_INIT() #define SCTP_IPI_COUNT_DESTROY() @@ -86,7 +92,9 @@ #define SCTP_INP_RLOCK(_inp) #define SCTP_INP_RUNLOCK(_inp) #define SCTP_INP_WLOCK(_inp) -#define SCTP_INP_WUNLOCK(_inep) +#define SCTP_INP_WUNLOCK(_inp) +#define SCTP_INP_RLOCK_ASSERT(_inp) +#define SCTP_INP_WLOCK_ASSERT(_inp) #define SCTP_INP_INCR_REF(_inp) #define SCTP_INP_DECR_REF(_inp) @@ -115,7 +123,7 @@ */ #define SCTP_IPI_COUNT_INIT() -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) #define SCTP_WQ_ADDR_INIT() \ InitializeCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx)) #define SCTP_WQ_ADDR_DESTROY() \ @@ -124,7 +132,7 @@ EnterCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx)) #define SCTP_WQ_ADDR_UNLOCK() \ LeaveCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx)) - +#define SCTP_WQ_ADDR_LOCK_ASSERT() #define SCTP_INP_INFO_LOCK_INIT() \ InitializeCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx)) @@ -140,6 +148,9 @@ LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx)) #define SCTP_INP_INFO_WUNLOCK() \ LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx)) +#define SCTP_INP_INFO_LOCK_ASSERT() +#define SCTP_INP_INFO_RLOCK_ASSERT() +#define SCTP_INP_INFO_WLOCK_ASSERT() #define SCTP_IP_PKTLOG_INIT() \ InitializeCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx)) @@ -185,6 +196,8 @@ #define SCTP_INP_WLOCK(_inp) \ EnterCriticalSection(&(_inp)->inp_mtx) #endif +#define SCTP_INP_RLOCK_ASSERT(_tcb) +#define SCTP_INP_WLOCK_ASSERT(_tcb) #define SCTP_TCB_SEND_LOCK_INIT(_tcb) \ InitializeCriticalSection(&(_tcb)->tcb_send_mtx) @@ -263,6 +276,8 @@ #define SCTP_WQ_ADDR_UNLOCK() \ (void)pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx)) #endif +#define SCTP_WQ_ADDR_LOCK_ASSERT() \ + KASSERT(pthread_mutex_trylock(&SCTP_BASE_INFO(wq_addr_mtx)) == EBUSY, ("%s: wq_addr_mtx not locked", __func__)) #define SCTP_INP_INFO_LOCK_INIT() \ (void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_ep_mtx), &SCTP_BASE_VAR(mtx_attr)) @@ -287,6 +302,12 @@ #define SCTP_INP_INFO_WUNLOCK() \ (void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx)) #endif +#define SCTP_INP_INFO_LOCK_ASSERT() \ + KASSERT(pthread_mutex_trylock(&SCTP_BASE_INFO(ipi_ep_mtx)) == EBUSY, ("%s: ipi_ep_mtx not locked", __func__)) +#define SCTP_INP_INFO_RLOCK_ASSERT() \ + KASSERT(pthread_mutex_trylock(&SCTP_BASE_INFO(ipi_ep_mtx)) == EBUSY, ("%s: ipi_ep_mtx not locked", __func__)) +#define SCTP_INP_INFO_WLOCK_ASSERT() \ + KASSERT(pthread_mutex_trylock(&SCTP_BASE_INFO(ipi_ep_mtx)) == EBUSY, ("%s: ipi_ep_mtx not locked", __func__)) #define SCTP_INP_INFO_TRYLOCK() \ (!(pthread_mutex_trylock(&SCTP_BASE_INFO(ipi_ep_mtx)))) @@ -377,6 +398,10 @@ #define SCTP_INP_WUNLOCK(_inp) \ (void)pthread_mutex_unlock(&(_inp)->inp_mtx) #endif +#define SCTP_INP_RLOCK_ASSERT(_inp) \ + KASSERT(pthread_mutex_trylock(&(_inp)->inp_mtx) == EBUSY, ("%s: inp_mtx not locked", __func__)) +#define SCTP_INP_WLOCK_ASSERT(_inp) \ + KASSERT(pthread_mutex_trylock(&(_inp)->inp_mtx) == EBUSY, ("%s: inp_mtx not locked", __func__)) #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1) #define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1) @@ -484,7 +509,7 @@ /* socket locks */ -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) #define SOCKBUF_LOCK_ASSERT(_so_buf) #define SOCKBUF_LOCK(_so_buf) \ EnterCriticalSection(&(_so_buf)->sb_mtx) @@ -519,7 +544,7 @@ #define SCTP_STATLOG_UNLOCK() #define SCTP_STATLOG_DESTROY() -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) /* address list locks */ #define SCTP_IPI_ADDR_INIT() \ InitializeCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx)) @@ -533,6 +558,8 @@ EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx)) #define SCTP_IPI_ADDR_WUNLOCK() \ LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx)) +#define SCTP_IPI_ADDR_LOCK_ASSERT() +#define SCTP_IPI_ADDR_WLOCK_ASSERT() /* iterator locks */ @@ -554,7 +581,7 @@ #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \ LeaveCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx) -#else /* end of __Userspace_os_Windows */ +#else /* address list locks */ #define SCTP_IPI_ADDR_INIT() \ (void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_addr_mtx), &SCTP_BASE_VAR(mtx_attr)) @@ -569,6 +596,10 @@ KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx already locked", __func__)) #define SCTP_IPI_ADDR_WUNLOCK() \ KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx not locked", __func__)) +#define SCTP_IPI_ADDR_LOCK_ASSERT() \ + KASSERT(pthread_mutex_trylock(&SCTP_BASE_INFO(ipi_addr_mtx)) == EBUSY, ("%s: ipi_addr_mtx not locked", __func__)) +#define SCTP_IPI_ADDR_WLOCK_ASSERT() \ + KASSERT(pthread_mutex_trylock(&SCTP_BASE_INFO(ipi_addr_mtx)) == EBUSY, ("%s: ipi_addr_mtx not locked", __func__)) #else #define SCTP_IPI_ADDR_RLOCK() \ (void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx)) @@ -578,6 +609,8 @@ (void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx)) #define SCTP_IPI_ADDR_WUNLOCK() \ (void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx)) +#define SCTP_IPI_ADDR_LOCK_ASSERT() +#define SCTP_IPI_ADDR_WLOCK_ASSERT() #endif /* iterator locks */ diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_sha1.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_sha1.c index aba616069..8472c3a1c 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_sha1.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_sha1.c @@ -80,13 +80,31 @@ sctp_sha1_final(unsigned char *digest, struct sctp_sha1_context *ctx) { SHA1_Final(digest, &ctx->sha_ctx); } +#elif defined(SCTP_USE_MBEDTLS_SHA1) +void +sctp_sha1_init(struct sctp_sha1_context *ctx) +{ + mbedtls_sha1_init(&ctx->sha1_ctx); + mbedtls_sha1_starts_ret(&ctx->sha1_ctx); +} +void +sctp_sha1_update(struct sctp_sha1_context *ctx, const unsigned char *ptr, unsigned int siz) +{ + mbedtls_sha1_update_ret(&ctx->sha1_ctx, ptr, siz); +} + +void +sctp_sha1_final(unsigned char *digest, struct sctp_sha1_context *ctx) +{ + mbedtls_sha1_finish_ret(&ctx->sha1_ctx, digest); +} #else #include -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) && defined(__Userspace__) #include -#elif !defined(__Windows__) +#elif !(defined(_WIN32) && !defined(__Userspace__)) #include #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_sha1.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_sha1.h index 6d6a05c10..9ff4ff7bd 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_sha1.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_sha1.h @@ -32,7 +32,7 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include __FBSDID("$FreeBSD$"); #endif @@ -43,18 +43,11 @@ __FBSDID("$FreeBSD$"); #include #if defined(SCTP_USE_NSS_SHA1) -#if defined(__Userspace_os_Darwin) -/* The NSS sources require __APPLE__ to be defined. - * XXX: Remove this ugly hack once the platform defines have been cleaned up. - */ -#define __APPLE__ -#endif #include -#if defined(__Userspace_os_Darwin) -#undef __APPLE__ -#endif #elif defined(SCTP_USE_OPENSSL_SHA1) #include +#elif defined(SCTP_USE_MBEDTLS_SHA1) +#include #endif struct sctp_sha1_context { @@ -62,6 +55,8 @@ struct sctp_sha1_context { struct PK11Context *pk11_ctx; #elif defined(SCTP_USE_OPENSSL_SHA1) SHA_CTX sha_ctx; +#elif defined(SCTP_USE_MBEDTLS_SHA1) + mbedtls_sha1_context sha1_ctx; #else unsigned int A; unsigned int B; @@ -83,7 +78,7 @@ struct sctp_sha1_context { #endif }; -#if (defined(__APPLE__) && defined(KERNEL)) +#if (defined(__APPLE__) && !defined(__Userspace__) && defined(KERNEL)) #ifndef _KERNEL #define _KERNEL #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_ss_functions.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_ss_functions.c index 59ef9cdfa..c23e142ea 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_ss_functions.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_ss_functions.c @@ -28,9 +28,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_ss_functions.c 345505 2019-03-25 16:40:54Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_ss_functions.c 365071 2020-09-01 21:19:14Z mjg $"); #endif #include @@ -185,7 +185,6 @@ sctp_ss_default_remove(struct sctp_tcb *stcb, struct sctp_association *asoc, return; } - static struct sctp_stream_out * sctp_ss_default_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, struct sctp_association *asoc) @@ -396,7 +395,6 @@ rrp_again: return; } - /* * Priority algorithm. * Always prefers streams based on their priority id. @@ -418,7 +416,6 @@ sctp_ss_prio_clear(struct sctp_tcb *stcb, struct sctp_association *asoc, TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.prio.next_spoke); strq->ss_params.prio.next_spoke.tqe_next = NULL; strq->ss_params.prio.next_spoke.tqe_prev = NULL; - } asoc->ss_data.last_out_stream = NULL; if (holds_lock == 0) { @@ -521,6 +518,9 @@ sctp_ss_prio_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, { struct sctp_stream_out *strq, *strqt, *strqn; + if (asoc->ss_data.locked_on_sending) { + return (asoc->ss_data.locked_on_sending); + } strqt = asoc->ss_data.last_out_stream; prio_again: /* Find the next stream to use */ @@ -589,7 +589,7 @@ sctp_ss_prio_set_value(struct sctp_tcb *stcb, struct sctp_association *asoc, /* * Fair bandwidth algorithm. - * Maintains an equal troughput per stream. + * Maintains an equal throughput per stream. */ static void sctp_ss_fb_clear(struct sctp_tcb *stcb, struct sctp_association *asoc, @@ -697,6 +697,9 @@ sctp_ss_fb_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, { struct sctp_stream_out *strq = NULL, *strqt; + if (asoc->ss_data.locked_on_sending) { + return (asoc->ss_data.locked_on_sending); + } if (asoc->ss_data.last_out_stream == NULL || TAILQ_FIRST(&asoc->ss_data.out.wheel) == TAILQ_LAST(&asoc->ss_data.out.wheel, sctpwheel_listhead)) { strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel); @@ -764,8 +767,8 @@ sctp_ss_fb_scheduled(struct sctp_tcb *stcb, struct sctp_nets *net SCTP_UNUSED, */ static void sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc, - struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp, - int holds_lock); + struct sctp_stream_out *strq SCTP_UNUSED, + struct sctp_stream_queue_pending *sp, int holds_lock); static void sctp_ss_fcfs_init(struct sctp_tcb *stcb, struct sctp_association *asoc, @@ -897,7 +900,6 @@ sctp_ss_fcfs_remove(struct sctp_tcb *stcb, struct sctp_association *asoc, return; } - static struct sctp_stream_out * sctp_ss_fcfs_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, struct sctp_association *asoc) @@ -905,6 +907,9 @@ sctp_ss_fcfs_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, struct sctp_stream_out *strq; struct sctp_stream_queue_pending *sp; + if (asoc->ss_data.locked_on_sending) { + return (asoc->ss_data.locked_on_sending); + } sp = TAILQ_FIRST(&asoc->ss_data.out.list); default_again: if (sp != NULL) { @@ -940,7 +945,7 @@ default_again: const struct sctp_ss_functions sctp_ss_functions[] = { /* SCTP_SS_DEFAULT */ { -#if defined(__Windows__) || defined(__Userspace_os_Windows) +#if defined(_WIN32) sctp_ss_default_init, sctp_ss_default_clear, sctp_ss_default_init_stream, @@ -970,7 +975,7 @@ const struct sctp_ss_functions sctp_ss_functions[] = { }, /* SCTP_SS_ROUND_ROBIN */ { -#if defined(__Windows__) || defined(__Userspace_os_Windows) +#if defined(_WIN32) sctp_ss_default_init, sctp_ss_default_clear, sctp_ss_default_init_stream, @@ -1000,7 +1005,7 @@ const struct sctp_ss_functions sctp_ss_functions[] = { }, /* SCTP_SS_ROUND_ROBIN_PACKET */ { -#if defined(__Windows__) || defined(__Userspace_os_Windows) +#if defined(_WIN32) sctp_ss_default_init, sctp_ss_default_clear, sctp_ss_default_init_stream, @@ -1030,7 +1035,7 @@ const struct sctp_ss_functions sctp_ss_functions[] = { }, /* SCTP_SS_PRIORITY */ { -#if defined(__Windows__) || defined(__Userspace_os_Windows) +#if defined(_WIN32) sctp_ss_default_init, sctp_ss_prio_clear, sctp_ss_prio_init_stream, @@ -1060,7 +1065,7 @@ const struct sctp_ss_functions sctp_ss_functions[] = { }, /* SCTP_SS_FAIR_BANDWITH */ { -#if defined(__Windows__) || defined(__Userspace_os_Windows) +#if defined(_WIN32) sctp_ss_default_init, sctp_ss_fb_clear, sctp_ss_fb_init_stream, @@ -1090,7 +1095,7 @@ const struct sctp_ss_functions sctp_ss_functions[] = { }, /* SCTP_SS_FIRST_COME */ { -#if defined(__Windows__) || defined(__Userspace_os_Windows) +#if defined(_WIN32) sctp_ss_fcfs_init, sctp_ss_fcfs_clear, sctp_ss_fcfs_init_stream, diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_structs.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_structs.h index a7fd665f5..c3ce74992 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_structs.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_structs.h @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_structs.h 345467 2019-03-24 12:13:05Z tuexen $"); +__FBSDID("$FreeBSD$"); #endif #ifndef _NETINET_SCTP_STRUCTS_H_ @@ -55,7 +55,7 @@ struct sctp_timer { void *ep; void *tcb; void *net; -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 +#if defined(__FreeBSD__) && !defined(__Userspace__) void *vnet; #endif @@ -65,7 +65,6 @@ struct sctp_timer { uint32_t stopped_from; }; - struct sctp_foo_stuff { struct sctp_inpcb *inp; uint32_t lineno; @@ -73,7 +72,6 @@ struct sctp_foo_stuff { int updown; }; - /* * This is the information we track on each interface that we know about from * the distant end. @@ -113,13 +111,12 @@ typedef void (*asoc_func) (struct sctp_inpcb *, struct sctp_tcb *, void *ptr, typedef int (*inp_func) (struct sctp_inpcb *, void *ptr, uint32_t val); typedef void (*end_func) (void *ptr, uint32_t val); -#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) +#if defined(__FreeBSD__) && !defined(__Userspace__) +#if defined(SCTP_MCORE_INPUT) && defined(SMP) /* whats on the mcore control struct */ struct sctp_mcore_queue { TAILQ_ENTRY(sctp_mcore_queue) next; -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 struct vnet *vn; -#endif struct mbuf *m; int off; int v6; @@ -135,14 +132,12 @@ struct sctp_mcore_ctrl { int running; int cpuid; }; - - #endif - +#endif struct sctp_iterator { TAILQ_ENTRY(sctp_iterator) sctp_nxt_itr; -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) struct vnet *vn; #endif struct sctp_timer tmr; @@ -166,7 +161,6 @@ struct sctp_iterator { #define SCTP_ITERATOR_DO_ALL_INP 0x00000001 #define SCTP_ITERATOR_DO_SINGLE_INP 0x00000002 - TAILQ_HEAD(sctpiterators, sctp_iterator); struct sctp_copy_all { @@ -184,10 +178,10 @@ struct sctp_asconf_iterator { }; struct iterator_control { -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) struct mtx ipi_iterator_wq_mtx; struct mtx it_mtx; -#elif defined(__APPLE__) +#elif defined(__APPLE__) && !defined(__Userspace__) lck_mtx_t *ipi_iterator_wq_mtx; lck_mtx_t *it_mtx; #elif defined(SCTP_PROCESS_LEVEL_LOCKS) @@ -200,7 +194,7 @@ struct iterator_control { pthread_mutex_t it_mtx; pthread_cond_t iterator_wakeup; #endif -#elif defined(__Windows__) +#elif defined(_WIN32) && !defined(__Userspace__) struct spinlock it_lock; struct spinlock ipi_iterator_wq_lock; KEVENT iterator_wakeup[2]; @@ -208,7 +202,7 @@ struct iterator_control { #else void *it_mtx; #endif -#if !defined(__Windows__) +#if !(defined(_WIN32) && !defined(__Userspace__)) #if !defined(__Userspace__) SCTP_PROCESS_STRUCT thread_proc; #else @@ -220,7 +214,7 @@ struct iterator_control { uint32_t iterator_running; uint32_t iterator_flags; }; -#if !defined(__FreeBSD__) +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) #define SCTP_ITERATOR_MUST_EXIT 0x00000001 #define SCTP_ITERATOR_EXITED 0x00000002 #endif @@ -228,28 +222,18 @@ struct iterator_control { #define SCTP_ITERATOR_STOP_CUR_INP 0x00000008 struct sctp_net_route { - sctp_rtentry_t *ro_rt; -#if defined(__FreeBSD__) -#if __FreeBSD_version < 1100093 -#if __FreeBSD_version >= 800000 - void *ro_lle; -#endif -#if __FreeBSD_version >= 900000 - void *ro_ia; - int ro_flags; -#endif -#else -#if __FreeBSD_version >= 1100116 +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct nhop_object *ro_nh; struct llentry *ro_lle; -#endif char *ro_prepend; uint16_t ro_plen; uint16_t ro_flags; uint16_t ro_mtu; uint16_t spare; +#else + sctp_rtentry_t *ro_rt; #endif -#endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) && !defined(APPLE_ELCAPITAN) struct llentry *ro_lle; #endif @@ -306,7 +290,6 @@ struct rtcc_cc { uint8_t last_inst_ind; /* Last saved inst indication */ }; - struct sctp_nets { TAILQ_ENTRY(sctp_nets) sctp_next; /* next link */ @@ -334,7 +317,7 @@ struct sctp_nets { int lastsa; int lastsv; uint64_t rtt; /* last measured rtt value in us */ - unsigned int RTO; + uint32_t RTO; /* This is used for SHUTDOWN/SHUTDOWN-ACK/SEND or INIT timers */ struct sctp_timer rxt_timer; @@ -443,13 +426,12 @@ struct sctp_nets { uint8_t last_hs_used; /* index into the last HS table entry we used */ uint8_t lan_type; uint8_t rto_needed; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint32_t flowid; uint8_t flowtype; #endif }; - struct sctp_data_chunkrec { uint32_t tsn; /* the TSN of this transmit */ uint32_t mid; /* the message identifier of this transmit */ @@ -488,7 +470,6 @@ struct chk_id { uint8_t can_take_data; }; - struct sctp_tmit_chunk { union { struct sctp_data_chunkrec data; @@ -597,6 +578,7 @@ struct sctp_stream_queue_pending { uint8_t sender_all_done; uint8_t put_last_out; uint8_t discard_rest; + uint8_t processing; }; /* @@ -616,6 +598,19 @@ struct sctp_stream_in { TAILQ_HEAD(sctpwheel_listhead, sctp_stream_out); TAILQ_HEAD(sctplist_listhead, sctp_stream_queue_pending); +/* + * This union holds all data necessary for + * different stream schedulers. + */ +struct scheduling_data { + struct sctp_stream_out *locked_on_sending; + /* circular looking for output selection */ + struct sctp_stream_out *last_out_stream; + union { + struct sctpwheel_listhead wheel; + struct sctplist_listhead list; + } out; +}; /* Round-robin schedulers */ struct ss_rr { @@ -639,20 +634,6 @@ struct ss_fb { int32_t rounds; }; -/* - * This union holds all data necessary for - * different stream schedulers. - */ -struct scheduling_data { - struct sctp_stream_out *locked_on_sending; - /* circular looking for output selection */ - struct sctp_stream_out *last_out_stream; - union { - struct sctpwheel_listhead wheel; - struct sctplist_listhead list; - } out; -}; - /* * This union holds all parameters per stream * necessary for different stream schedulers. @@ -670,8 +651,6 @@ union scheduling_parameters { #define SCTP_STREAM_RESET_PENDING 0x03 #define SCTP_STREAM_RESET_IN_FLIGHT 0x04 -#define SCTP_MAX_STREAMS_AT_ONCE_RESET 200 - /* This struct is used to track the traffic on outbound streams */ struct sctp_stream_out { struct sctp_streamhead outqueue; @@ -695,6 +674,8 @@ struct sctp_stream_out { uint8_t state; }; +#define SCTP_MAX_STREAMS_AT_ONCE_RESET 200 + /* used to keep track of the addresses yet to try to add/delete */ TAILQ_HEAD(sctp_asconf_addrhead, sctp_asconf_addr); struct sctp_asconf_addr { @@ -868,7 +849,6 @@ struct sctp_association { struct sctp_timer strreset_timer; /* stream reset */ struct sctp_timer shut_guard_timer; /* shutdown guard */ struct sctp_timer autoclose_timer; /* automatic close timer */ - struct sctp_timer delayed_event_timer; /* timer for delayed events */ struct sctp_timer delete_prim_timer; /* deleting primary dst */ /* list of restricted local addresses */ @@ -934,7 +914,6 @@ struct sctp_association { /* last place I got a control from */ struct sctp_nets *last_control_chunk_from; - /* * wait to the point the cum-ack passes req->send_reset_at_tsn for * any req on the list. @@ -998,7 +977,6 @@ struct sctp_association { /* Original seq number I used ??questionable to keep?? */ uint32_t init_seq_number; - /* The Advanced Peer Ack Point, as required by the PR-SCTP */ /* (A1 in Section 4.2) */ uint32_t advanced_peer_ack_point; @@ -1122,7 +1100,7 @@ struct sctp_association { uint32_t heart_beat_delay; /* autoclose */ - unsigned int sctp_autoclose_ticks; + uint32_t sctp_autoclose_ticks; /* how many preopen streams we have */ unsigned int pre_open_streams; @@ -1131,7 +1109,7 @@ struct sctp_association { unsigned int max_inbound_streams; /* the cookie life I award for any cookie, in seconds */ - unsigned int cookie_life; + uint32_t cookie_life; /* time to delay acks for */ unsigned int delayed_ack; unsigned int old_delayed_ack; @@ -1140,10 +1118,10 @@ struct sctp_association { unsigned int numduptsns; int dup_tsns[SCTP_MAX_DUP_TSNS]; - unsigned int initial_init_rto_max; /* initial RTO for INIT's */ - unsigned int initial_rto; /* initial send RTO */ - unsigned int minrto; /* per assoc RTO-MIN */ - unsigned int maxrto; /* per assoc RTO-MAX */ + uint32_t initial_init_rto_max; /* initial RTO for INIT's */ + uint32_t initial_rto; /* initial send RTO */ + uint32_t minrto; /* per assoc RTO-MIN */ + uint32_t maxrto; /* per assoc RTO-MAX */ /* authentication fields */ sctp_auth_chklist_t *local_auth_chunks; diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_sysctl.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_sysctl.c index ba911c3c5..bb49e1738 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_sysctl.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_sysctl.c @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_sysctl.c 356357 2020-01-04 20:33:12Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_sysctl.c 365071 2020-09-01 21:19:14Z mjg $"); #endif #include @@ -44,15 +44,15 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_sysctl.c 356357 2020-01-04 20:33:12Z t #include #include #include -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #include #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #include #endif -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) FEATURE(sctp, "Stream Control Transmission Protocol"); #endif @@ -74,7 +74,7 @@ sctp_init_sysctls() SCTP_BASE_SYSCTL(sctp_reconfig_enable) = SCTPCTL_RECONFIG_ENABLE_DEFAULT; SCTP_BASE_SYSCTL(sctp_nrsack_enable) = SCTPCTL_NRSACK_ENABLE_DEFAULT; SCTP_BASE_SYSCTL(sctp_pktdrop_enable) = SCTPCTL_PKTDROP_ENABLE_DEFAULT; -#if !(defined(__FreeBSD__) && __FreeBSD_version >= 800000) +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) = SCTPCTL_LOOPBACK_NOCSUM_DEFAULT; #endif SCTP_BASE_SYSCTL(sctp_peer_chunk_oh) = SCTPCTL_PEER_CHKOH_DEFAULT; @@ -152,7 +152,7 @@ sctp_init_sysctls() SCTP_BASE_SYSCTL(sctp_sendall_limit) = SCTPCTL_SENDALL_LIMIT_DEFAULT; SCTP_BASE_SYSCTL(sctp_diag_info_code) = SCTPCTL_DIAG_INFO_CODE_DEFAULT; #if defined(SCTP_LOCAL_TRACE_BUF) -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) /* On Windows, the resource for global variables is limited. */ MALLOC(SCTP_BASE_SYSCTL(sctp_log), struct sctp_log *, sizeof(struct sctp_log), M_SYSCTL, M_ZERO); #else @@ -165,18 +165,18 @@ sctp_init_sysctls() #if defined(SCTP_DEBUG) SCTP_BASE_SYSCTL(sctp_debug_on) = SCTPCTL_DEBUG_DEFAULT; #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_BASE_SYSCTL(sctp_ignore_vmware_interfaces) = SCTPCTL_IGNORE_VMWARE_INTERFACES_DEFAULT; SCTP_BASE_SYSCTL(sctp_main_timer) = SCTPCTL_MAIN_TIMER_DEFAULT; SCTP_BASE_SYSCTL(sctp_addr_watchdog_limit) = SCTPCTL_ADDR_WATCHDOG_LIMIT_DEFAULT; SCTP_BASE_SYSCTL(sctp_vtag_watchdog_limit) = SCTPCTL_VTAG_WATCHDOG_LIMIT_DEFAULT; #endif -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_BASE_SYSCTL(sctp_output_unlocked) = SCTPCTL_OUTPUT_UNLOCKED_DEFAULT; #endif } +#if defined(_WIN32) && !defined(__Userspace__) -#if defined(__Windows__) void sctp_finish_sysctls() { @@ -189,7 +189,7 @@ sctp_finish_sysctls() } #endif -#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__Windows__) +#if !defined(__Userspace__) /* It returns an upper limit. No filtering is done here */ static unsigned int sctp_sysctl_number_of_addresses(struct sctp_inpcb *inp) @@ -329,7 +329,7 @@ sctp_sysctl_copy_out_local_addresses(struct sctp_inpcb *inp, struct sctp_tcb *st sin = &sctp_ifa->address.sin; if (sin->sin_addr.s_addr == 0) continue; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { continue; @@ -350,7 +350,7 @@ sctp_sysctl_copy_out_local_addresses(struct sctp_inpcb *inp, struct sctp_tcb *st sin6 = &sctp_ifa->address.sin6; if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) continue; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { continue; @@ -428,7 +428,7 @@ sctp_sysctl_copy_out_local_addresses(struct sctp_inpcb *inp, struct sctp_tcb *st /* * sysctl functions */ -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) static int sctp_sysctl_handle_assoclist SYSCTL_HANDLER_ARGS { @@ -458,7 +458,7 @@ sctp_sysctl_handle_assoclist(SYSCTL_HANDLER_ARGS) number_of_remote_addresses = 0; SCTP_INP_INFO_RLOCK(); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (req->oldptr == USER_ADDR_NULL) { #else if (req->oldptr == NULL) { @@ -483,14 +483,14 @@ sctp_sysctl_handle_assoclist(SYSCTL_HANDLER_ARGS) (number_of_remote_addresses + number_of_associations) * sizeof(struct xsctp_raddr); /* request some more memory than needed */ -#if !defined(__Windows__) +#if !(defined(_WIN32) && !defined(__Userspace__)) req->oldidx = (n + n / 8); #else req->dataidx = (n + n / 8); #endif return (0); } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (req->newptr != USER_ADDR_NULL) { #else if (req->newptr != NULL) { @@ -511,16 +511,12 @@ sctp_sysctl_handle_assoclist(SYSCTL_HANDLER_ARGS) xinpcb.last = 0; xinpcb.local_port = ntohs(inp->sctp_lport); xinpcb.flags = inp->sctp_flags; -#if defined(__FreeBSD__) && __FreeBSD_version < 1000048 - xinpcb.features = (uint32_t)inp->sctp_features; -#else xinpcb.features = inp->sctp_features; -#endif xinpcb.total_sends = inp->total_sends; xinpcb.total_recvs = inp->total_recvs; xinpcb.total_nospaces = inp->total_nospaces; xinpcb.fragmentation_point = inp->sctp_frag_point; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) xinpcb.socket = (uintptr_t)inp->sctp_socket; #else xinpcb.socket = inp->sctp_socket; @@ -532,33 +528,16 @@ sctp_sysctl_handle_assoclist(SYSCTL_HANDLER_ARGS) xinpcb.qlen = 0; xinpcb.maxqlen = 0; } else { -#if defined(__FreeBSD__) && __FreeBSD_version >= 1200034 +#if defined(__FreeBSD__) && !defined(__Userspace__) xinpcb.qlen = so->sol_qlen; -#else - xinpcb.qlen = so->so_qlen; -#endif -#if defined(__FreeBSD__) && __FreeBSD_version > 1100096 -#if __FreeBSD_version >= 1200034 xinpcb.qlen_old = so->sol_qlen > USHRT_MAX ? USHRT_MAX : (uint16_t) so->sol_qlen; -#else - xinpcb.qlen_old = so->so_qlen > USHRT_MAX ? - USHRT_MAX : (uint16_t) so->so_qlen; -#endif -#endif -#if defined(__FreeBSD__) && __FreeBSD_version >= 1200034 xinpcb.maxqlen = so->sol_qlimit; -#else - xinpcb.maxqlen = so->so_qlimit; -#endif -#if defined(__FreeBSD__) && __FreeBSD_version > 1100096 -#if __FreeBSD_version >= 1200034 xinpcb.maxqlen_old = so->sol_qlimit > USHRT_MAX ? USHRT_MAX : (uint16_t) so->sol_qlimit; #else - xinpcb.maxqlen_old = so->so_qlimit > USHRT_MAX ? - USHRT_MAX : (uint16_t) so->so_qlimit; -#endif + xinpcb.qlen = so->so_qlen; + xinpcb.maxqlen = so->so_qlimit; #endif } SCTP_INP_INCR_REF(inp); @@ -587,16 +566,8 @@ sctp_sysctl_handle_assoclist(SYSCTL_HANDLER_ARGS) xstcb.primary_addr = stcb->asoc.primary_destination->ro._l_addr; xstcb.heartbeat_interval = stcb->asoc.heart_beat_delay; xstcb.state = (uint32_t)sctp_map_assoc_state(stcb->asoc.state); -#if defined(__FreeBSD__) -#if __FreeBSD_version >= 800000 - /* 7.0 does not support these */ xstcb.assoc_id = sctp_get_associd(stcb); xstcb.peers_rwnd = stcb->asoc.peers_rwnd; -#endif -#else - xstcb.assoc_id = sctp_get_associd(stcb); - xstcb.peers_rwnd = stcb->asoc.peers_rwnd; -#endif xstcb.in_streams = stcb->asoc.streamincnt; xstcb.out_streams = stcb->asoc.streamoutcnt; xstcb.max_nr_retrans = stcb->asoc.overall_error_count; @@ -648,8 +619,6 @@ sctp_sysctl_handle_assoclist(SYSCTL_HANDLER_ARGS) xraddr.cwnd = net->cwnd; xraddr.flight_size = net->flight_size; xraddr.mtu = net->mtu; -#if defined(__FreeBSD__) -#if __FreeBSD_version >= 800000 xraddr.rtt = net->rtt / 1000; xraddr.heartbeat_interval = net->heart_beat_delay; xraddr.ssthresh = net->ssthresh; @@ -661,20 +630,6 @@ sctp_sysctl_handle_assoclist(SYSCTL_HANDLER_ARGS) } else { xraddr.state = SCTP_INACTIVE; } -#endif -#else - xraddr.rtt = net->rtt / 1000; - xraddr.heartbeat_interval = net->heart_beat_delay; - xraddr.ssthresh = net->ssthresh; - xraddr.encaps_port = net->port; - if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { - xraddr.state = SCTP_UNCONFIRMED; - } else if (net->dest_state & SCTP_ADDR_REACHABLE) { - xraddr.state = SCTP_ACTIVE; - } else { - xraddr.state = SCTP_INACTIVE; - } -#endif xraddr.start_time.tv_sec = (uint32_t)net->start_time.tv_sec; xraddr.start_time.tv_usec = (uint32_t)net->start_time.tv_usec; SCTP_INP_RUNLOCK(inp); @@ -721,7 +676,7 @@ skip: return (error); } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) static int sctp_sysctl_handle_udp_tunneling SYSCTL_HANDLER_ARGS { @@ -738,22 +693,14 @@ sctp_sysctl_handle_udp_tunneling(SYSCTL_HANDLER_ARGS) old = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); SCTP_INP_INFO_RUNLOCK(); new = old; -#if defined(__FreeBSD__) && __FreeBSD_version >= 800056 && __FreeBSD_version < 1000100 -#ifdef VIMAGE - error = vnet_sysctl_handle_int(oidp, &new, 0, req); -#else error = sysctl_handle_int(oidp, &new, 0, req); -#endif -#else - error = sysctl_handle_int(oidp, &new, 0, req); -#endif if ((error == 0) && -#if defined (__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) (req->newptr != USER_ADDR_NULL)) { #else (req->newptr != NULL)) { #endif -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) SCTP_INP_INFO_WLOCK(); sctp_over_udp_restart(); SCTP_INP_INFO_WUNLOCK(); @@ -780,8 +727,8 @@ sctp_sysctl_handle_udp_tunneling(SYSCTL_HANDLER_ARGS) } return (error); } +#if defined(__APPLE__) && !defined(__Userspace__) -#if defined(__APPLE__) int sctp_is_vmware_interface(struct ifnet *); static int @@ -814,7 +761,7 @@ sctp_sysctl_handle_vmware_interfaces SYSCTL_HANDLER_ARGS } #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) static int sctp_sysctl_handle_auth SYSCTL_HANDLER_ARGS { @@ -828,17 +775,9 @@ sctp_sysctl_handle_auth(SYSCTL_HANDLER_ARGS) uint32_t new; new = SCTP_BASE_SYSCTL(sctp_auth_enable); -#if defined(__FreeBSD__) && __FreeBSD_version >= 800056 && __FreeBSD_version < 1000100 -#ifdef VIMAGE - error = vnet_sysctl_handle_int(oidp, &new, 0, req); -#else error = sysctl_handle_int(oidp, &new, 0, req); -#endif -#else - error = sysctl_handle_int(oidp, &new, 0, req); -#endif if ((error == 0) && -#if defined (__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) (req->newptr != USER_ADDR_NULL)) { #else (req->newptr != NULL)) { @@ -859,7 +798,7 @@ sctp_sysctl_handle_auth(SYSCTL_HANDLER_ARGS) return (error); } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) static int sctp_sysctl_handle_asconf SYSCTL_HANDLER_ARGS { @@ -873,17 +812,9 @@ sctp_sysctl_handle_asconf(SYSCTL_HANDLER_ARGS) uint32_t new; new = SCTP_BASE_SYSCTL(sctp_asconf_enable); -#if defined(__FreeBSD__) && __FreeBSD_version >= 800056 && __FreeBSD_version < 1000100 -#ifdef VIMAGE - error = vnet_sysctl_handle_int(oidp, &new, 0, req); -#else error = sysctl_handle_int(oidp, &new, 0, req); -#endif -#else - error = sysctl_handle_int(oidp, &new, 0, req); -#endif if ((error == 0) && -#if defined (__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) (req->newptr != USER_ADDR_NULL)) { #else (req->newptr != NULL)) { @@ -904,7 +835,7 @@ sctp_sysctl_handle_asconf(SYSCTL_HANDLER_ARGS) return (error); } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) static int sctp_sysctl_handle_stats SYSCTL_HANDLER_ARGS { @@ -915,7 +846,7 @@ sctp_sysctl_handle_stats(SYSCTL_HANDLER_ARGS) { #endif int error; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #if defined(SMP) && defined(SCTP_USE_PERCPU_STAT) struct sctpstat *sarry; struct sctpstat sb; @@ -924,7 +855,7 @@ sctp_sysctl_handle_stats(SYSCTL_HANDLER_ARGS) struct sctpstat sb_temp; #endif -#if defined (__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if ((req->newptr != USER_ADDR_NULL) && #else if ((req->newptr != NULL) && @@ -932,7 +863,7 @@ sctp_sysctl_handle_stats(SYSCTL_HANDLER_ARGS) (req->newlen != sizeof(struct sctpstat))) { return (EINVAL); } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) memset(&sb_temp, 0, sizeof(struct sctpstat)); if (req->newptr != NULL) { @@ -1089,7 +1020,7 @@ sctp_sysctl_handle_stats(SYSCTL_HANDLER_ARGS) } #if defined(SCTP_LOCAL_TRACE_BUF) -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) static int sctp_sysctl_handle_trace_log SYSCTL_HANDLER_ARGS { @@ -1101,7 +1032,7 @@ sctp_sysctl_handle_trace_log(SYSCTL_HANDLER_ARGS) #endif int error; -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) error = SYSCTL_OUT(req, SCTP_BASE_SYSCTL(sctp_log), sizeof(struct sctp_log)); #else error = SYSCTL_OUT(req, &SCTP_BASE_SYSCTL(sctp_log), sizeof(struct sctp_log)); @@ -1109,7 +1040,7 @@ sctp_sysctl_handle_trace_log(SYSCTL_HANDLER_ARGS) return (error); } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) static int sctp_sysctl_handle_trace_log_clear SYSCTL_HANDLER_ARGS { @@ -1120,7 +1051,7 @@ sctp_sysctl_handle_trace_log_clear(SYSCTL_HANDLER_ARGS) { #endif int error = 0; -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) int value = 0; if (req->new_data == NULL) { @@ -1138,33 +1069,8 @@ sctp_sysctl_handle_trace_log_clear(SYSCTL_HANDLER_ARGS) } #endif -#if defined(__APPLE__) || defined(__FreeBSD__) +#if (defined(__APPLE__) || defined(__FreeBSD__)) && !defined(__Userspace__) #if defined(__FreeBSD__) -#if __FreeBSD_version >= 800056 && __FreeBSD_version < 1000100 -#ifdef VIMAGE -#define SCTP_UINT_SYSCTL(name, var_name, prefix) \ - static int \ - sctp_sysctl_handle_##mib_name(SYSCTL_HANDLER_ARGS) \ - { \ - int error; \ - uint32_t new; \ - \ - new = SCTP_BASE_SYSCTL(var_name); \ - error = vnet_sysctl_handle_int(oidp, &new, 0, req); \ - if ((error == 0) && (req->newptr != NULL)) { \ - if ((new < prefix##_MIN) || \ - (new > prefix##_MAX)) { \ - error = EINVAL; \ - } else { \ - SCTP_BASE_SYSCTL(var_name) = new; \ - } \ - } \ - return (error); \ - } \ - SYSCTL_PROC(_net_inet_sctp, OID_AUTO, mib_name, \ - CTLTYPE_UINT|CTLFLAG_RW, NULL, 0, \ - sctp_sysctl_handle_##mib_name, "UI", prefix##_DESC); -#else #define SCTP_UINT_SYSCTL(mib_name, var_name, prefix) \ static int \ sctp_sysctl_handle_##mib_name(SYSCTL_HANDLER_ARGS) \ @@ -1187,31 +1093,6 @@ sctp_sysctl_handle_trace_log_clear(SYSCTL_HANDLER_ARGS) SYSCTL_PROC(_net_inet_sctp, OID_AUTO, mib_name, \ CTLFLAG_VNET|CTLTYPE_UINT|CTLFLAG_RW, NULL, 0, \ sctp_sysctl_handle_##mib_name, "UI", prefix##_DESC); -#endif -#else -#define SCTP_UINT_SYSCTL(mib_name, var_name, prefix) \ - static int \ - sctp_sysctl_handle_##mib_name(SYSCTL_HANDLER_ARGS) \ - { \ - int error; \ - uint32_t new; \ - \ - new = SCTP_BASE_SYSCTL(var_name); \ - error = sysctl_handle_int(oidp, &new, 0, req); \ - if ((error == 0) && (req->newptr != NULL)) { \ - if ((new < prefix##_MIN) || \ - (new > prefix##_MAX)) { \ - error = EINVAL; \ - } else { \ - SCTP_BASE_SYSCTL(var_name) = new; \ - } \ - } \ - return (error); \ - } \ - SYSCTL_PROC(_net_inet_sctp, OID_AUTO, mib_name, \ - CTLFLAG_VNET|CTLTYPE_UINT|CTLFLAG_RW, NULL, 0, \ - sctp_sysctl_handle_##mib_name, "UI", prefix##_DESC); -#endif #else #define SCTP_UINT_SYSCTL(mib_name, var_name, prefix) \ static int \ @@ -1258,7 +1139,7 @@ SYSCTL_PROC(_net_inet_sctp, OID_AUTO, asconf_enable, CTLFLAG_VNET|CTLTYPE_UINT|C SCTP_UINT_SYSCTL(reconfig_enable, sctp_reconfig_enable, SCTPCTL_RECONFIG_ENABLE) SCTP_UINT_SYSCTL(nrsack_enable, sctp_nrsack_enable, SCTPCTL_NRSACK_ENABLE) SCTP_UINT_SYSCTL(pktdrop_enable, sctp_pktdrop_enable, SCTPCTL_PKTDROP_ENABLE) -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_UINT_SYSCTL(loopback_nocsum, sctp_no_csum_on_loopback, SCTPCTL_LOOPBACK_NOCSUM) #endif SCTP_UINT_SYSCTL(peer_chkoh, sctp_peer_chunk_oh, SCTPCTL_PEER_CHKOH) @@ -1330,14 +1211,14 @@ SCTP_UINT_SYSCTL(diag_info_code, sctp_diag_info_code, SCTPCTL_DIAG_INFO_CODE) #ifdef SCTP_DEBUG SCTP_UINT_SYSCTL(debug, sctp_debug_on, SCTPCTL_DEBUG) #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_UINT_SYSCTL(main_timer, sctp_main_timer, SCTPCTL_MAIN_TIMER) SYSCTL_PROC(_net_inet_sctp, OID_AUTO, ignore_vmware_interfaces, CTLTYPE_UINT|CTLFLAG_RW, NULL, 0, sctp_sysctl_handle_vmware_interfaces, "IU", SCTPCTL_IGNORE_VMWARE_INTERFACES_DESC); SCTP_UINT_SYSCTL(addr_watchdog_limit, sctp_addr_watchdog_limit, SCTPCTL_ADDR_WATCHDOG_LIMIT) SCTP_UINT_SYSCTL(vtag_watchdog_limit, sctp_vtag_watchdog_limit, SCTPCTL_VTAG_WATCHDOG_LIMIT) #endif -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_UINT_SYSCTL(output_unlocked, sctp_output_unlocked, SCTPCTL_OUTPUT_UNLOCKED) #endif SYSCTL_PROC(_net_inet_sctp, OID_AUTO, stats, CTLFLAG_VNET|CTLTYPE_STRUCT|CTLFLAG_RW, @@ -1345,7 +1226,7 @@ SYSCTL_PROC(_net_inet_sctp, OID_AUTO, stats, CTLFLAG_VNET|CTLTYPE_STRUCT|CTLFLAG SYSCTL_PROC(_net_inet_sctp, OID_AUTO, assoclist, CTLFLAG_VNET|CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0, sctp_sysctl_handle_assoclist, "S,xassoc", "List of active SCTP associations"); -#elif defined(__Windows__) +#elif defined(_WIN32) && !defined(__Userspace__) #define RANGECHK(var, min, max) \ if ((var) < (min)) { (var) = (min); } \ diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_sysctl.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_sysctl.h index 3957e619f..006a11f31 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_sysctl.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_sysctl.h @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_sysctl.h 356357 2020-01-04 20:33:12Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_sysctl.h 366750 2020-10-16 10:44:48Z tuexen $"); #endif #ifndef _NETINET_SCTP_SYSCTL_H_ @@ -56,7 +56,7 @@ struct sctp_sysctl { uint32_t sctp_nrsack_enable; uint32_t sctp_pktdrop_enable; uint32_t sctp_fr_max_burst_default; -#if !(defined(__FreeBSD__) && __FreeBSD_version >= 800000) +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) uint32_t sctp_no_csum_on_loopback; #endif uint32_t sctp_peer_chunk_oh; @@ -113,7 +113,7 @@ struct sctp_sysctl { uint32_t sctp_use_dccc_ecn; uint32_t sctp_diag_info_code; #if defined(SCTP_LOCAL_TRACE_BUF) -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) struct sctp_log *sctp_log; #else struct sctp_log sctp_log; @@ -129,13 +129,13 @@ struct sctp_sysctl { #if defined(SCTP_DEBUG) uint32_t sctp_debug_on; #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) uint32_t sctp_ignore_vmware_interfaces; uint32_t sctp_main_timer; uint32_t sctp_addr_watchdog_limit; uint32_t sctp_vtag_watchdog_limit; #endif -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) uint32_t sctp_output_unlocked; #endif }; @@ -241,7 +241,6 @@ struct sctp_sysctl { #define SCTPCTL_FRMAXBURST_MAX 0xFFFFFFFF #define SCTPCTL_FRMAXBURST_DEFAULT SCTP_DEF_FRMAX_BURST - /* maxchunks: Default max chunks on queue per asoc */ #define SCTPCTL_MAXCHUNKS_DESC "Default max chunks on queue per asoc" #define SCTPCTL_MAXCHUNKS_MIN 0 @@ -344,10 +343,10 @@ struct sctp_sysctl { #define SCTPCTL_INIT_RTO_MAX_MAX 0xFFFFFFFF #define SCTPCTL_INIT_RTO_MAX_DEFAULT SCTP_RTO_UPPER_BOUND -/* valid_cookie_life: Default cookie lifetime in sec */ -#define SCTPCTL_VALID_COOKIE_LIFE_DESC "Default cookie lifetime in seconds" -#define SCTPCTL_VALID_COOKIE_LIFE_MIN 0 -#define SCTPCTL_VALID_COOKIE_LIFE_MAX 0xFFFFFFFF +/* valid_cookie_life: Default cookie lifetime in ms */ +#define SCTPCTL_VALID_COOKIE_LIFE_DESC "Default cookie lifetime in ms" +#define SCTPCTL_VALID_COOKIE_LIFE_MIN SCTP_MIN_COOKIE_LIFE +#define SCTPCTL_VALID_COOKIE_LIFE_MAX SCTP_MAX_COOKIE_LIFE #define SCTPCTL_VALID_COOKIE_LIFE_DEFAULT SCTP_DEFAULT_COOKIE_LIFE /* init_rtx_max: Default maximum number of retransmission for INIT chunks */ @@ -498,7 +497,7 @@ struct sctp_sysctl { #define SCTPCTL_UDP_TUNNELING_PORT_DESC "Set the SCTP/UDP tunneling port" #define SCTPCTL_UDP_TUNNELING_PORT_MIN 0 #define SCTPCTL_UDP_TUNNELING_PORT_MAX 65535 -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #define SCTPCTL_UDP_TUNNELING_PORT_DEFAULT 0 #else #define SCTPCTL_UDP_TUNNELING_PORT_DEFAULT SCTP_OVER_UDP_TUNNELING_PORT @@ -585,7 +584,7 @@ struct sctp_sysctl { #define SCTPCTL_DEBUG_DEFAULT 0 #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #define SCTPCTL_MAIN_TIMER_DESC "Main timer interval in ms" #define SCTPCTL_MAIN_TIMER_MIN 1 #define SCTPCTL_MAIN_TIMER_MAX 0xFFFFFFFF @@ -595,16 +594,12 @@ struct sctp_sysctl { #define SCTPCTL_IGNORE_VMWARE_INTERFACES_MIN 0 #define SCTPCTL_IGNORE_VMWARE_INTERFACES_MAX 1 #define SCTPCTL_IGNORE_VMWARE_INTERFACES_DEFAULT SCTPCTL_IGNORE_VMWARE_INTERFACES_MAX -#endif -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) #define SCTPCTL_OUTPUT_UNLOCKED_DESC "Unlock socket when sending packets down to IP" #define SCTPCTL_OUTPUT_UNLOCKED_MIN 0 #define SCTPCTL_OUTPUT_UNLOCKED_MAX 1 #define SCTPCTL_OUTPUT_UNLOCKED_DEFAULT SCTPCTL_OUTPUT_UNLOCKED_MIN -#endif -#if defined(__APPLE__) #define SCTPCTL_ADDR_WATCHDOG_LIMIT_DESC "Address watchdog limit" #define SCTPCTL_ADDR_WATCHDOG_LIMIT_MIN 0 #define SCTPCTL_ADDR_WATCHDOG_LIMIT_MAX 0xFFFFFFFF @@ -614,8 +609,8 @@ struct sctp_sysctl { #define SCTPCTL_VTAG_WATCHDOG_LIMIT_MIN 0 #define SCTPCTL_VTAG_WATCHDOG_LIMIT_MAX 0xFFFFFFFF #define SCTPCTL_VTAG_WATCHDOG_LIMIT_DEFAULT SCTPCTL_VTAG_WATCHDOG_LIMIT_MIN -#endif +#endif #if defined(_KERNEL) || defined(__Userspace__) #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__) #if defined(SYSCTL_DECL) @@ -624,7 +619,7 @@ SYSCTL_DECL(_net_inet_sctp); #endif void sctp_init_sysctls(void); -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) void sctp_finish_sysctls(void); #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_timer.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_timer.c index 98fafec11..c518d6fb2 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_timer.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_timer.c @@ -32,16 +32,16 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.c 338134 2018-08-21 13:25:32Z tuexen $"); +__FBSDID("$FreeBSD$"); #endif #define _IP_VHL #include #include #ifdef INET6 -#if defined(__Userspace_os_FreeBSD) +#if defined(__FreeBSD__) && defined(__Userspace__) #include #endif #endif @@ -57,15 +57,11 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.c 338134 2018-08-21 13:25:32Z tu #include #include #if defined(INET) || defined(INET6) -#if !defined(__Userspace_os_Windows) +#if !(defined(_WIN32) && defined(__Userspace__)) #include #endif #endif -#if defined(__APPLE__) -#define APPLE_FILE_NO 6 -#endif - void sctp_audit_retranmission_queue(struct sctp_association *asoc) { @@ -168,15 +164,15 @@ sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb, op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Association error counter exceeded"); inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_2; - sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); + sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED); return (1); } return (0); } /* - * sctp_find_alternate_net() returns a non-NULL pointer as long - * the argument net is non-NULL. + * sctp_find_alternate_net() returns a non-NULL pointer as long as there + * exists nets, which are not being deleted. */ struct sctp_nets * sctp_find_alternate_net(struct sctp_tcb *stcb, @@ -185,13 +181,13 @@ sctp_find_alternate_net(struct sctp_tcb *stcb, { /* Find and return an alternate network if possible */ struct sctp_nets *alt, *mnet, *min_errors_net = NULL , *max_cwnd_net = NULL; - int once; + bool looped; /* JRS 5/14/07 - Initialize min_errors to an impossible value. */ int min_errors = -1; uint32_t max_cwnd = 0; if (stcb->asoc.numnets == 1) { - /* No others but net */ + /* No selection can be made. */ return (TAILQ_FIRST(&stcb->asoc.nets)); } /* @@ -327,28 +323,29 @@ sctp_find_alternate_net(struct sctp_tcb *stcb, return (max_cwnd_net); } } - mnet = net; - once = 0; - - if (mnet == NULL) { - mnet = TAILQ_FIRST(&stcb->asoc.nets); - if (mnet == NULL) { - return (NULL); - } + /* Look for an alternate net, which is active. */ + if ((net != NULL) && ((net->dest_state & SCTP_ADDR_BEING_DELETED) == 0)) { + alt = TAILQ_NEXT(net, sctp_next); + } else { + alt = TAILQ_FIRST(&stcb->asoc.nets); } + looped = false; for (;;) { - alt = TAILQ_NEXT(mnet, sctp_next); if (alt == NULL) { - once++; - if (once > 1) { + if (!looped) { + alt = TAILQ_FIRST(&stcb->asoc.nets); + looped = true; + } + /* Definitely out of candidates. */ + if (alt == NULL) { break; } - alt = TAILQ_FIRST(&stcb->asoc.nets); - if (alt == NULL) { - return (NULL); - } } +#if defined(__FreeBSD__) && !defined(__Userspace__) + if (alt->ro.ro_nh == NULL) { +#else if (alt->ro.ro_rt == NULL) { +#endif if (alt->ro._s_addr) { sctp_free_ifa(alt->ro._s_addr); alt->ro._s_addr = NULL; @@ -356,44 +353,61 @@ sctp_find_alternate_net(struct sctp_tcb *stcb, alt->src_addr_selected = 0; } if (((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) && +#if defined(__FreeBSD__) && !defined(__Userspace__) + (alt->ro.ro_nh != NULL) && +#else (alt->ro.ro_rt != NULL) && - (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))) { - /* Found a reachable address */ +#endif + (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) && + (alt != net)) { + /* Found an alternate net, which is reachable. */ break; } - mnet = alt; + alt = TAILQ_NEXT(alt, sctp_next); } if (alt == NULL) { - /* Case where NO insv network exists (dormant state) */ - /* we rotate destinations */ - once = 0; - mnet = net; + /* + * In case no active alternate net has been found, look for + * an alternate net, which is confirmed. + */ + if ((net != NULL) && ((net->dest_state & SCTP_ADDR_BEING_DELETED) == 0)) { + alt = TAILQ_NEXT(net, sctp_next); + } else { + alt = TAILQ_FIRST(&stcb->asoc.nets); + } + looped = false; for (;;) { - if (mnet == NULL) { - return (TAILQ_FIRST(&stcb->asoc.nets)); - } - alt = TAILQ_NEXT(mnet, sctp_next); if (alt == NULL) { - once++; - if (once > 1) { - break; + if (!looped) { + alt = TAILQ_FIRST(&stcb->asoc.nets); + looped = true; } - alt = TAILQ_FIRST(&stcb->asoc.nets); + /* Definitely out of candidates. */ if (alt == NULL) { break; } } if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) && (alt != net)) { - /* Found an alternate address */ + /* Found an alternate net, which is confirmed. */ break; } - mnet = alt; + alt = TAILQ_NEXT(alt, sctp_next); } } if (alt == NULL) { - return (net); + /* + * In case no confirmed alternate net has been found, just + * return net, if it is not being deleted. In the other case + * just return the first net. + */ + if ((net != NULL) && ((net->dest_state & SCTP_ADDR_BEING_DELETED) == 0)) { + alt = net; + } + if (alt == NULL) { + alt = TAILQ_FIRST(&stcb->asoc.nets); + } } return (alt); } @@ -495,7 +509,6 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb, uint32_t tsnlast, tsnfirst; int recovery_cnt = 0; - /* none in flight now */ audit_tf = 0; fir = 0; @@ -517,7 +530,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb, } tv.tv_sec = cur_rto / 1000000; tv.tv_usec = cur_rto % 1000000; -#ifndef __FreeBSD__ +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) timersub(&now, &tv, &min_wait); #else min_wait = now; @@ -533,8 +546,8 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb, min_wait.tv_sec = min_wait.tv_usec = 0; } if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { - sctp_log_fr(cur_rto, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME); - sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME); + sctp_log_fr(cur_rto, (uint32_t)now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME); + sctp_log_fr(0, (uint32_t)min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME); } /* * Our rwnd will be incorrect here since we are not adding back the @@ -581,7 +594,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb, /* validate its been outstanding long enough */ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { sctp_log_fr(chk->rec.data.tsn, - chk->sent_rcv_time.tv_sec, + (uint32_t)chk->sent_rcv_time.tv_sec, chk->sent_rcv_time.tv_usec, SCTP_FR_T3_MARK_TIME); } @@ -593,7 +606,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb, */ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { sctp_log_fr(0, - chk->sent_rcv_time.tv_sec, + (uint32_t)chk->sent_rcv_time.tv_sec, chk->sent_rcv_time.tv_usec, SCTP_FR_T3_STOPPED); } @@ -614,7 +627,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb, } if (stcb->asoc.prsctp_supported && PR_SCTP_TTL_ENABLED(chk->flags)) { /* Is it expired? */ -#ifndef __FreeBSD__ +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) if (timercmp(&now, &chk->rec.data.timetodrop, >)) { #else if (timevalcmp(&now, &chk->rec.data.timetodrop, >)) { @@ -806,7 +819,6 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb, return (0); } - int sctp_t3rxt_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, @@ -848,11 +860,11 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp, if (net != stcb->asoc.primary_destination) { /* send a immediate HB if our RTO is stale */ struct timeval now; - unsigned int ms_goneby; + uint32_t ms_goneby; (void)SCTP_GETTIME_TIMEVAL(&now); if (net->last_sent_time.tv_sec) { - ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000; + ms_goneby = (uint32_t)(now.tv_sec - net->last_sent_time.tv_sec) * 1000; } else { ms_goneby = 0; } @@ -942,10 +954,14 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp, net->src_addr_selected = 0; /* Force a route allocation too */ +#if defined(__FreeBSD__) && !defined(__Userspace__) + RO_NHFREE(&net->ro); +#else if (net->ro.ro_rt) { RTFREE(net->ro.ro_rt); net->ro.ro_rt = NULL; } +#endif /* Was it our primary? */ if ((stcb->asoc.primary_destination == net) && (alt != net)) { @@ -982,7 +998,12 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp, /* C3. See if we need to send a Fwd-TSN */ if (SCTP_TSN_GT(stcb->asoc.advanced_peer_ack_point, stcb->asoc.last_acked_seq)) { send_forward_tsn(stcb, &stcb->asoc); - if (lchk) { + for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) { + if (lchk->whoTo != NULL) { + break; + } + } + if (lchk != NULL) { /* Assure a timer is up */ sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo); } @@ -1064,7 +1085,7 @@ sctp_cookie_timer(struct sctp_inpcb *inp, op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Cookie timer expired, but no cookie"); inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_3; - sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); + sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED); } else { #ifdef INVARIANTS panic("Cookie timer expires in wrong state?"); @@ -1108,10 +1129,9 @@ sctp_cookie_timer(struct sctp_inpcb *inp, } int -sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, - struct sctp_nets *net) +sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb) { - struct sctp_nets *alt; + struct sctp_nets *alt, *net; struct sctp_tmit_chunk *strrst = NULL, *chk = NULL; if (stcb->asoc.stream_reset_outstanding == 0) { @@ -1122,9 +1142,9 @@ sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, if (strrst == NULL) { return (0); } + net = strrst->whoTo; /* do threshold management */ - if (sctp_threshold_management(inp, stcb, strrst->whoTo, - stcb->asoc.max_send_times)) { + if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { /* Assoc is over */ return (1); } @@ -1132,9 +1152,8 @@ sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, * Cleared threshold management, now lets backoff the address * and select an alternate */ - sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0, 0); - alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0); - sctp_free_remote_addr(strrst->whoTo); + sctp_backoff_on_timeout(stcb, net, 1, 0, 0); + alt = sctp_find_alternate_net(stcb, net, 0); strrst->whoTo = alt; atomic_add_int(&alt->ref_count, 1); @@ -1159,6 +1178,8 @@ sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, */ sctp_move_chunks_from_net(stcb, net); } + sctp_free_remote_addr(net); + /* mark the retran info */ if (strrst->sent != SCTP_DATAGRAM_RESEND) sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); @@ -1166,7 +1187,7 @@ sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, strrst->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* restart the timer */ - sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo); + sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, alt); return (0); } @@ -1191,8 +1212,9 @@ sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, if (asconf == NULL) { return (0); } + net = asconf->whoTo; /* do threshold management */ - if (sctp_threshold_management(inp, stcb, asconf->whoTo, + if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { /* Assoc is over */ return (1); @@ -1205,17 +1227,16 @@ sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, * Mark this peer as ASCONF incapable and cleanup. */ SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n"); - sctp_asconf_cleanup(stcb, net); + sctp_asconf_cleanup(stcb); return (0); } /* * cleared threshold management, so now backoff the net and * select an alternate */ - sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0, 0); - alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0); + sctp_backoff_on_timeout(stcb, net, 1, 0, 0); + alt = sctp_find_alternate_net(stcb, net, 0); if (asconf->whoTo != alt) { - sctp_free_remote_addr(asconf->whoTo); asconf->whoTo = alt; atomic_add_int(&alt->ref_count, 1); } @@ -1252,6 +1273,8 @@ sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, */ sctp_move_chunks_from_net(stcb, net); } + sctp_free_remote_addr(net); + /* mark the retran info */ if (asconf->sent != SCTP_DATAGRAM_RESEND) sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); @@ -1266,8 +1289,7 @@ sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, /* Mobility adaptation */ void -sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, - struct sctp_nets *net SCTP_UNUSED) +sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb) { if (stcb->asoc.deleted_primary == NULL) { SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n"); @@ -1441,7 +1463,7 @@ sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, if ((net->last_sent_time.tv_sec > 0) || (net->last_sent_time.tv_usec > 0)) { -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) struct timeval diff; SCTP_GETTIME_TIMEVAL(&diff); @@ -1487,7 +1509,7 @@ sctp_pathmtu_timer(struct sctp_inpcb *inp, if (net->ro._l_addr.sa.sa_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; /* KAME hack: embed scopeid */ -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) (void)in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL); #else @@ -1520,7 +1542,11 @@ sctp_pathmtu_timer(struct sctp_inpcb *inp, net->src_addr_selected = 1; } if (net->ro._s_addr) { +#if defined(__FreeBSD__) && !defined(__Userspace__) + mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_nh); +#else mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt); +#endif #if defined(INET) || defined(INET6) if (net->port) { mtu -= sizeof(struct udphdr); @@ -1538,16 +1564,14 @@ sctp_pathmtu_timer(struct sctp_inpcb *inp, } void -sctp_autoclose_timer(struct sctp_inpcb *inp, - struct sctp_tcb *stcb, - struct sctp_nets *net) +sctp_autoclose_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb) { struct timeval tn, *tim_touse; struct sctp_association *asoc; - int ticks_gone_by; + uint32_t ticks_gone_by; (void)SCTP_GETTIME_TIMEVAL(&tn); - if (stcb->asoc.sctp_autoclose_ticks && + if (stcb->asoc.sctp_autoclose_ticks > 0 && sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { /* Auto close is on */ asoc = &stcb->asoc; @@ -1559,9 +1583,8 @@ sctp_autoclose_timer(struct sctp_inpcb *inp, tim_touse = &asoc->time_last_sent; } /* Now has long enough transpired to autoclose? */ - ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec); - if ((ticks_gone_by > 0) && - (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) { + ticks_gone_by = sctp_secs_to_ticks((uint32_t)(tn.tv_sec - tim_touse->tv_sec)); + if (ticks_gone_by >= asoc->sctp_autoclose_ticks) { /* * autoclose time has hit, call the output routine, * which should do nothing just to be SURE we don't @@ -1579,7 +1602,7 @@ sctp_autoclose_timer(struct sctp_inpcb *inp, */ if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) { /* only send SHUTDOWN 1st time thru */ - struct sctp_nets *netp; + struct sctp_nets *net; if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { @@ -1588,17 +1611,15 @@ sctp_autoclose_timer(struct sctp_inpcb *inp, SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); sctp_stop_timers_for_shutdown(stcb); if (stcb->asoc.alternate) { - netp = stcb->asoc.alternate; + net = stcb->asoc.alternate; } else { - netp = stcb->asoc.primary_destination; + net = stcb->asoc.primary_destination; } - sctp_send_shutdown(stcb, netp); + sctp_send_shutdown(stcb, net); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, - stcb->sctp_ep, stcb, - netp); + stcb->sctp_ep, stcb, net); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, - stcb->sctp_ep, stcb, - netp); + stcb->sctp_ep, stcb, NULL); } } } else { @@ -1606,13 +1627,12 @@ sctp_autoclose_timer(struct sctp_inpcb *inp, * No auto close at this time, reset t-o to check * later */ - int tmp; + uint32_t tmp; /* fool the timer startup to use the time left */ tmp = asoc->sctp_autoclose_ticks; asoc->sctp_autoclose_ticks -= ticks_gone_by; - sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, - net); + sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); /* restore the real tick value */ asoc->sctp_autoclose_ticks = tmp; } diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_timer.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_timer.h index 822d5b400..8cfbbca5d 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_timer.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_timer.h @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.h 295709 2016-02-17 18:04:22Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.h 365071 2020-09-01 21:19:14Z mjg $"); #endif #ifndef _NETINET_SCTP_TIMER_H_ @@ -46,18 +46,20 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.h 295709 2016-02-17 18:04:22Z tu #define SCTP_RTT_VAR_SHIFT 2 struct sctp_nets * -sctp_find_alternate_net(struct sctp_tcb *, - struct sctp_nets *, int mode); +sctp_find_alternate_net(struct sctp_tcb *, struct sctp_nets *, int); int sctp_t3rxt_timer(struct sctp_inpcb *, struct sctp_tcb *, struct sctp_nets *); + int sctp_t1init_timer(struct sctp_inpcb *, struct sctp_tcb *, struct sctp_nets *); + int sctp_shutdown_timer(struct sctp_inpcb *, struct sctp_tcb *, struct sctp_nets *); + int sctp_heartbeat_timer(struct sctp_inpcb *, struct sctp_tcb *, struct sctp_nets *); @@ -74,32 +76,28 @@ int sctp_shutdownack_timer(struct sctp_inpcb *, struct sctp_tcb *, struct sctp_nets *); int -sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, - struct sctp_nets *net); +sctp_strreset_timer(struct sctp_inpcb *, struct sctp_tcb *); int sctp_asconf_timer(struct sctp_inpcb *, struct sctp_tcb *, struct sctp_nets *); void -sctp_delete_prim_timer(struct sctp_inpcb *, struct sctp_tcb *, - struct sctp_nets *); +sctp_delete_prim_timer(struct sctp_inpcb *, struct sctp_tcb *); void -sctp_autoclose_timer(struct sctp_inpcb *, struct sctp_tcb *, - struct sctp_nets *net); +sctp_autoclose_timer(struct sctp_inpcb *, struct sctp_tcb *); void sctp_audit_retranmission_queue(struct sctp_association *); void sctp_iterator_timer(struct sctp_iterator *it); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION) void sctp_slowtimo(void); #else void sctp_gc(struct inpcbinfo *); #endif #endif - #endif #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_uio.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_uio.h index ecc74cc49..18956f1a1 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_uio.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_uio.h @@ -32,29 +32,28 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_uio.h 336511 2018-07-19 20:16:33Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_uio.h 365071 2020-09-01 21:19:14Z mjg $"); #endif #ifndef _NETINET_SCTP_UIO_H_ #define _NETINET_SCTP_UIO_H_ -#if (defined(__APPLE__) && defined(KERNEL)) +#if (defined(__APPLE__) && !defined(__Userspace__) && defined(KERNEL)) #ifndef _KERNEL #define _KERNEL #endif #endif - -#if !(defined(__Windows__)) && !defined(__Userspace_os_Windows) -#if ! defined(_KERNEL) +#if !defined(_WIN32) +#if !defined(_KERNEL) #include #endif #include #include #include #endif -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) #pragma warning(push) #pragma warning(disable: 4200) #if defined(_KERNEL) @@ -110,14 +109,8 @@ struct sctp_event_subscribe { * ancillary data structures */ struct sctp_initmsg { -#if defined(__FreeBSD__) && __FreeBSD_version < 800000 - /* This is a bug. Not fixed for ABI compatibility */ - uint32_t sinit_num_ostreams; - uint32_t sinit_max_instreams; -#else uint16_t sinit_num_ostreams; uint16_t sinit_max_instreams; -#endif uint16_t sinit_max_attempts; uint16_t sinit_max_init_timeo; }; @@ -135,7 +128,6 @@ struct sctp_initmsg { * all sendrcvinfo's need a verfid for SENDING only. */ - #define SCTP_ALIGN_RESV_PAD 92 #define SCTP_ALIGN_RESV_PAD_SHORT 76 @@ -143,9 +135,6 @@ struct sctp_sndrcvinfo { uint16_t sinfo_stream; uint16_t sinfo_ssn; uint16_t sinfo_flags; -#if defined(__FreeBSD__) && __FreeBSD_version < 800000 - uint16_t sinfo_pr_policy; -#endif uint32_t sinfo_ppid; uint32_t sinfo_context; uint32_t sinfo_timetolive; @@ -161,9 +150,6 @@ struct sctp_extrcvinfo { uint16_t sinfo_stream; uint16_t sinfo_ssn; uint16_t sinfo_flags; -#if defined(__FreeBSD__) && __FreeBSD_version < 800000 - uint16_t sinfo_pr_policy; -#endif uint32_t sinfo_ppid; uint32_t sinfo_context; uint32_t sinfo_timetolive; /* should have been sinfo_pr_value */ @@ -453,7 +439,6 @@ struct sctp_setadaption { uint32_t ssb_adaption_ind; }; - /* * Partial Delivery API event */ @@ -470,7 +455,6 @@ struct sctp_pdapi_event { /* indication values */ #define SCTP_PARTIAL_DELIVERY_ABORTED 0x0001 - /* * authentication key event */ @@ -490,7 +474,6 @@ struct sctp_authkey_event { #define SCTP_AUTH_NO_AUTH 0x0002 #define SCTP_AUTH_FREE_KEY 0x0003 - struct sctp_sender_dry_event { uint16_t sender_dry_type; uint16_t sender_dry_flags; @@ -498,7 +481,6 @@ struct sctp_sender_dry_event { sctp_assoc_t sender_dry_assoc_id; }; - /* * Stream reset event - subscribe to SCTP_STREAM_RESET_EVENT */ @@ -546,7 +528,6 @@ struct sctp_stream_change_event { #define SCTP_STREAM_CHANGE_DENIED 0x0004 #define SCTP_STREAM_CHANGE_FAILED 0x0008 - /* SCTP notification event */ struct sctp_tlv { uint16_t sn_type; @@ -662,10 +643,18 @@ struct sctp_setpeerprim { uint8_t sspp_padding[4]; }; +union sctp_sockstore { + struct sockaddr_in sin; + struct sockaddr_in6 sin6; +#if defined(__Userspace__) + struct sockaddr_conn sconn; +#endif + struct sockaddr sa; +}; + struct sctp_getaddresses { sctp_assoc_t sget_assoc_id; - /* addr is filled in for N * sockaddr_storage */ - struct sockaddr addr[1]; + union sctp_sockstore addr[]; }; struct sctp_status { @@ -1121,13 +1110,18 @@ struct sctpstat { #define SCTP_STAT_INCR(_x) SCTP_STAT_INCR_BY(_x,1) #define SCTP_STAT_DECR(_x) SCTP_STAT_DECR_BY(_x,1) -#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT) +#if defined(__FreeBSD__) && !defined(__Userspace__) +#if defined(SMP) && defined(SCTP_USE_PERCPU_STAT) #define SCTP_STAT_INCR_BY(_x,_d) (SCTP_BASE_STATS[PCPU_GET(cpuid)]._x += _d) #define SCTP_STAT_DECR_BY(_x,_d) (SCTP_BASE_STATS[PCPU_GET(cpuid)]._x -= _d) #else #define SCTP_STAT_INCR_BY(_x,_d) atomic_add_int(&SCTP_BASE_STAT(_x), _d) #define SCTP_STAT_DECR_BY(_x,_d) atomic_subtract_int(&SCTP_BASE_STAT(_x), _d) #endif +#else +#define SCTP_STAT_INCR_BY(_x,_d) atomic_add_int(&SCTP_BASE_STAT(_x), _d) +#define SCTP_STAT_DECR_BY(_x,_d) atomic_subtract_int(&SCTP_BASE_STAT(_x), _d) +#endif /* The following macros are for handling MIB values, */ #define SCTP_STAT_INCR_COUNTER32(_x) SCTP_STAT_INCR(_x) #define SCTP_STAT_INCR_COUNTER64(_x) SCTP_STAT_INCR(_x) @@ -1136,24 +1130,14 @@ struct sctpstat { #define SCTP_STAT_DECR_COUNTER64(_x) SCTP_STAT_DECR(_x) #define SCTP_STAT_DECR_GAUGE32(_x) SCTP_STAT_DECR(_x) -union sctp_sockstore { - struct sockaddr_in sin; - struct sockaddr_in6 sin6; -#if defined(__Userspace__) - struct sockaddr_conn sconn; -#endif - struct sockaddr sa; -}; - - /***********************************/ /* And something for us old timers */ /***********************************/ -#ifndef __APPLE__ -#ifndef __Userspace__ +#if !(defined(__APPLE__) && !defined(__Userspace__)) +#if !defined(__Userspace__) #ifndef ntohll -#if defined(__Userspace_os_Linux) +#if defined(__linux__) #ifndef _BSD_SOURCE #define _BSD_SOURCE #endif @@ -1165,7 +1149,7 @@ union sctp_sockstore { #endif #ifndef htonll -#if defined(__Userspace_os_Linux) +#if defined(__linux__) #ifndef _BSD_SOURCE #define _BSD_SOURCE #endif @@ -1179,21 +1163,16 @@ union sctp_sockstore { #endif /***********************************/ - struct xsctp_inpcb { uint32_t last; uint32_t flags; -#if defined(__FreeBSD__) && __FreeBSD_version < 1000048 - uint32_t features; -#else uint64_t features; -#endif uint32_t total_sends; uint32_t total_recvs; uint32_t total_nospaces; uint32_t fragmentation_point; uint16_t local_port; -#if defined(__FreeBSD__) && __FreeBSD_version > 1100096 +#if defined(__FreeBSD__) && !defined(__Userspace__) uint16_t qlen_old; uint16_t maxqlen_old; #else @@ -1201,22 +1180,16 @@ struct xsctp_inpcb { uint16_t maxqlen; #endif uint16_t __spare16; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) kvaddr_t socket; #else void *socket; #endif -#if defined(__FreeBSD__) && __FreeBSD_version > 1100096 +#if defined(__FreeBSD__) && !defined(__Userspace__) uint32_t qlen; uint32_t maxqlen; #endif -#if defined(__FreeBSD__) && __FreeBSD_version < 1000048 - uint32_t extra_padding[32]; /* future */ -#elif defined(__FreeBSD__) && (__FreeBSD_version < 1001517) - uint32_t extra_padding[31]; /* future */ -#else uint32_t extra_padding[26]; /* future */ -#endif }; struct xsctp_tcb { @@ -1245,18 +1218,9 @@ struct xsctp_tcb { uint16_t remote_port; /* sctpAssocEntry 4 */ struct sctp_timeval start_time; /* sctpAssocEntry 16 */ struct sctp_timeval discontinuity_time; /* sctpAssocEntry 17 */ -#if defined(__FreeBSD__) -#if __FreeBSD_version >= 800000 uint32_t peers_rwnd; sctp_assoc_t assoc_id; /* sctpAssocEntry 1 */ uint32_t extra_padding[32]; /* future */ -#else -#endif -#else - uint32_t peers_rwnd; - sctp_assoc_t assoc_id; /* sctpAssocEntry 1 */ - uint32_t extra_padding[32]; /* future */ -#endif }; struct xsctp_laddr { @@ -1281,23 +1245,12 @@ struct xsctp_raddr { uint8_t heartbeat_enabled; /* sctpAssocLocalRemEntry 4 */ uint8_t potentially_failed; struct sctp_timeval start_time; /* sctpAssocLocalRemEntry 8 */ -#if defined(__FreeBSD__) -#if __FreeBSD_version >= 800000 uint32_t rtt; uint32_t heartbeat_interval; uint32_t ssthresh; uint16_t encaps_port; uint16_t state; uint32_t extra_padding[29]; /* future */ -#endif -#else - uint32_t rtt; - uint32_t heartbeat_interval; - uint32_t ssthresh; - uint16_t encaps_port; - uint16_t state; - uint32_t extra_padding[29]; /* future */ -#endif }; #define SCTP_MAX_LOGGING_SIZE 30000 @@ -1324,19 +1277,14 @@ int sctp_lower_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, -#if defined(__Panda__) - pakhandle_type i_pak, - pakhandle_type i_control, -#else struct mbuf *i_pak, struct mbuf *control, -#endif int flags, struct sctp_sndrcvinfo *srcv -#if !(defined(__Panda__) || defined(__Userspace__)) -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 +#if !defined(__Userspace__) +#if defined(__FreeBSD__) ,struct thread *p -#elif defined(__Windows__) +#elif defined(_WIN32) , PKTHREAD p #else ,struct proc *p @@ -1347,11 +1295,7 @@ sctp_lower_sosend(struct socket *so, int sctp_sorecvmsg(struct socket *so, struct uio *uio, -#if defined(__Panda__) - particletype **mp, -#else struct mbuf **mp, -#endif struct sockaddr *from, int fromlen, int *msg_flags, @@ -1365,45 +1309,6 @@ sctp_sorecvmsg(struct socket *so, #if !(defined(_KERNEL)) && !(defined(__Userspace__)) __BEGIN_DECLS -#if defined(__FreeBSD__) && __FreeBSD_version < 902000 -int sctp_peeloff __P((int, sctp_assoc_t)); -int sctp_bindx __P((int, struct sockaddr *, int, int)); -int sctp_connectx __P((int, const struct sockaddr *, int, sctp_assoc_t *)); -int sctp_getaddrlen __P((sa_family_t)); -int sctp_getpaddrs __P((int, sctp_assoc_t, struct sockaddr **)); -void sctp_freepaddrs __P((struct sockaddr *)); -int sctp_getladdrs __P((int, sctp_assoc_t, struct sockaddr **)); -void sctp_freeladdrs __P((struct sockaddr *)); -int sctp_opt_info __P((int, sctp_assoc_t, int, void *, socklen_t *)); - -/* deprecated */ -ssize_t sctp_sendmsg __P((int, const void *, size_t, const struct sockaddr *, - socklen_t, uint32_t, uint32_t, uint16_t, uint32_t, uint32_t)); - -/* deprecated */ -ssize_t sctp_send __P((int, const void *, size_t, - const struct sctp_sndrcvinfo *, int)); - -/* deprecated */ -ssize_t sctp_sendx __P((int, const void *, size_t, struct sockaddr *, - int, struct sctp_sndrcvinfo *, int)); - -/* deprecated */ -ssize_t sctp_sendmsgx __P((int sd, const void *, size_t, struct sockaddr *, - int, uint32_t, uint32_t, uint16_t, uint32_t, uint32_t)); - -sctp_assoc_t sctp_getassocid __P((int, struct sockaddr *)); - -/* deprecated */ -ssize_t sctp_recvmsg __P((int, void *, size_t, struct sockaddr *, socklen_t *, - struct sctp_sndrcvinfo *, int *)); - -ssize_t sctp_sendv __P((int, const struct iovec *, int, struct sockaddr *, - int, void *, socklen_t, unsigned int, int)); - -ssize_t sctp_recvv __P((int, const struct iovec *, int, struct sockaddr *, - socklen_t *, void *, socklen_t *, unsigned int *, int *)); -#else int sctp_peeloff(int, sctp_assoc_t); int sctp_bindx(int, struct sockaddr *, int, int); int sctp_connectx(int, const struct sockaddr *, int, sctp_assoc_t *); @@ -1441,7 +1346,6 @@ ssize_t sctp_sendv(int, const struct iovec *, int, struct sockaddr *, ssize_t sctp_recvv(int, const struct iovec *, int, struct sockaddr *, socklen_t *, void *, socklen_t *, unsigned int *, int *); -#endif __END_DECLS #endif /* !_KERNEL */ diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_userspace.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_userspace.c index 28922b6a1..41aff19e0 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_userspace.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_userspace.c @@ -32,24 +32,24 @@ #include #include #if !defined(__MINGW32__) -#pragma comment(lib, "IPHLPAPI.lib") +#pragma comment(lib, "iphlpapi.lib") #endif #endif #include -#if defined(__Userspace_os_FreeBSD) +#if defined(__FreeBSD__) #include #endif -#if defined(__Userspace_os_Linux) +#if defined(__linux__) #include #endif -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) /* Adapter to translate Unix thread start routines to Windows thread start * routines. */ #if defined(__MINGW32__) -#pragma GCC diagnostic push +#pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wpedantic" #endif static DWORD WINAPI @@ -83,42 +83,61 @@ sctp_userspace_thread_create(userland_thread_t *thread, start_routine_t start_ro void sctp_userspace_set_threadname(const char *name) { -#if defined(__Userspace_os_Darwin) +#if defined(__APPLE__) pthread_setname_np(name); #endif -#if defined(__Userspace_os_Linux) +#if defined(__linux__) prctl(PR_SET_NAME, name); #endif -#if defined(__Userspace_os_FreeBSD) +#if defined(__FreeBSD__) pthread_set_name_np(pthread_self(), name); #endif } -#if !defined(_WIN32) && !defined(__Userspace_os_NaCl) +#if !defined(_WIN32) && !defined(__native_client__) int sctp_userspace_get_mtu_from_ifn(uint32_t if_index, int af) { +#if defined(INET) || defined(INET6) struct ifreq ifr; int fd; +#endif + int mtu; - memset(&ifr, 0, sizeof(struct ifreq)); - if (if_indextoname(if_index, ifr.ifr_name) != NULL) { - /* TODO can I use the raw socket here and not have to open a new one with each query? */ - if ((fd = socket(af, SOCK_DGRAM, 0)) < 0) - return (0); - if (ioctl(fd, SIOCGIFMTU, &ifr) < 0) { + switch (af) { +#if defined(INET) + case AF_INET: +#endif +#if defined(INET6) + case AF_INET6: +#endif +#if defined(INET) || defined(INET6) + memset(&ifr, 0, sizeof(struct ifreq)); + mtu = 0; + if (if_indextoname(if_index, ifr.ifr_name) != NULL) { + /* TODO can I use the raw socket here and not have to open a new one with each query? */ + if ((fd = socket(af, SOCK_DGRAM, 0)) < 0) { + break; + } + if (ioctl(fd, SIOCGIFMTU, &ifr) >= 0) { + mtu = ifr.ifr_mtu; + } close(fd); - return (0); } - close(fd); - return ifr.ifr_mtu; - } else { - return (0); + break; +#endif + case AF_CONN: + mtu = 1280; + break; + default: + mtu = 0; + break; } + return (mtu); } #endif -#if defined(__Userspace_os_NaCl) +#if defined(__native_client__) int sctp_userspace_get_mtu_from_ifn(uint32_t if_index, int af) { @@ -126,7 +145,7 @@ sctp_userspace_get_mtu_from_ifn(uint32_t if_index, int af) } #endif -#if defined(__Userspace_os_Darwin) || defined(__Userspace_os_DragonFly) || defined(__Userspace_os_Linux) || defined(__Userspace_os_NaCl) || defined(__Userspace_os_NetBSD) || defined(__Userspace_os_Windows) || defined(__Userspace_os_Fuchsia) +#if defined(__APPLE__) || defined(__DragonFly__) || defined(__linux__) || defined(__native_client__) || defined(__NetBSD__) || defined(_WIN32) || defined(__Fuchsia__) || defined(__EMSCRIPTEN__) int timingsafe_bcmp(const void *b1, const void *b2, size_t n) { @@ -143,51 +162,88 @@ timingsafe_bcmp(const void *b1, const void *b2, size_t n) int sctp_userspace_get_mtu_from_ifn(uint32_t if_index, int af) { +#if defined(INET) || defined(INET6) PIP_ADAPTER_ADDRESSES pAdapterAddrs, pAdapt; DWORD AdapterAddrsSize, Err; - int ret; +#endif + int mtu; - ret = 0; - AdapterAddrsSize = 0; - pAdapterAddrs = NULL; - if ((Err = GetAdaptersAddresses(AF_UNSPEC, 0, NULL, NULL, &AdapterAddrsSize)) != 0) { - if ((Err != ERROR_BUFFER_OVERFLOW) && (Err != ERROR_INSUFFICIENT_BUFFER)) { - SCTPDBG(SCTP_DEBUG_USR, "GetAdaptersAddresses() sizing failed with error code %d, AdapterAddrsSize = %d\n", Err, AdapterAddrsSize); - ret = -1; + switch (af) { +#if defined(INET) + case AF_INET: +#endif +#if defined(INET6) + case AF_INET6: +#endif +#if defined(INET) || defined(INET6) + mtu = 0; + AdapterAddrsSize = 0; + pAdapterAddrs = NULL; + if ((Err = GetAdaptersAddresses(AF_UNSPEC, 0, NULL, NULL, &AdapterAddrsSize)) != 0) { + if ((Err != ERROR_BUFFER_OVERFLOW) && (Err != ERROR_INSUFFICIENT_BUFFER)) { + SCTPDBG(SCTP_DEBUG_USR, "GetAdaptersAddresses() sizing failed with error code %d, AdapterAddrsSize = %d\n", Err, AdapterAddrsSize); + mtu = -1; + goto cleanup; + } + } + if ((pAdapterAddrs = (PIP_ADAPTER_ADDRESSES) GlobalAlloc(GPTR, AdapterAddrsSize)) == NULL) { + SCTPDBG(SCTP_DEBUG_USR, "Memory allocation error!\n"); + mtu = -1; goto cleanup; } - } - if ((pAdapterAddrs = (PIP_ADAPTER_ADDRESSES) GlobalAlloc(GPTR, AdapterAddrsSize)) == NULL) { - SCTPDBG(SCTP_DEBUG_USR, "Memory allocation error!\n"); - ret = -1; - goto cleanup; - } - if ((Err = GetAdaptersAddresses(AF_UNSPEC, 0, NULL, pAdapterAddrs, &AdapterAddrsSize)) != ERROR_SUCCESS) { - SCTPDBG(SCTP_DEBUG_USR, "GetAdaptersAddresses() failed with error code %d\n", Err); - ret = -1; - goto cleanup; - } - for (pAdapt = pAdapterAddrs; pAdapt; pAdapt = pAdapt->Next) { - if (pAdapt->IfIndex == if_index) { - ret = pAdapt->Mtu; - break; + if ((Err = GetAdaptersAddresses(AF_UNSPEC, 0, NULL, pAdapterAddrs, &AdapterAddrsSize)) != ERROR_SUCCESS) { + SCTPDBG(SCTP_DEBUG_USR, "GetAdaptersAddresses() failed with error code %d\n", Err); + mtu = -1; + goto cleanup; } + for (pAdapt = pAdapterAddrs; pAdapt; pAdapt = pAdapt->Next) { + if (pAdapt->IfIndex == if_index) { + mtu = pAdapt->Mtu; + break; + } + } + cleanup: + if (pAdapterAddrs != NULL) { + GlobalFree(pAdapterAddrs); + } + break; +#endif + case AF_CONN: + mtu = 1280; + break; + default: + mtu = 0; + break; } -cleanup: - if (pAdapterAddrs != NULL) { - GlobalFree(pAdapterAddrs); - } - return (ret); + return (mtu); } void getwintimeofday(struct timeval *tv) { - struct timeb tb; + FILETIME filetime; + ULARGE_INTEGER ularge; - ftime(&tb); - tv->tv_sec = (long)tb.time; - tv->tv_usec = (long)(tb.millitm) * 1000L; + GetSystemTimeAsFileTime(&filetime); + ularge.LowPart = filetime.dwLowDateTime; + ularge.HighPart = filetime.dwHighDateTime; + /* Change base from Jan 1 1601 00:00:00 to Jan 1 1970 00:00:00 */ +#if defined(__MINGW32__) + ularge.QuadPart -= 116444736000000000ULL; +#else + ularge.QuadPart -= 116444736000000000UI64; +#endif + /* + * ularge.QuadPart is now the number of 100-nanosecond intervals + * since Jan 1 1970 00:00:00. + */ +#if defined(__MINGW32__) + tv->tv_sec = (long)(ularge.QuadPart / 10000000ULL); + tv->tv_usec = (long)((ularge.QuadPart % 10000000ULL) / 10ULL); +#else + tv->tv_sec = (long)(ularge.QuadPart / 10000000UI64); + tv->tv_usec = (long)((ularge.QuadPart % 10000000UI64) / 10UI64); +#endif } int diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_usrreq.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_usrreq.c index 6a19f62aa..24785d278 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_usrreq.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_usrreq.c @@ -32,13 +32,13 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_usrreq.c 356270 2020-01-02 13:55:10Z tuexen $"); +__FBSDID("$FreeBSD$"); #endif #include -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #endif #include @@ -61,18 +61,13 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_usrreq.c 356270 2020-01-02 13:55:10Z t #else #include #endif -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #endif - #if defined(HAVE_SCTP_PEELOFF_SOCKOPT) #include #endif /* HAVE_SCTP_PEELOFF_SOCKOPT */ -#if defined(__APPLE__) -#define APPLE_FILE_NO 7 -#endif - extern const struct sctp_cc_functions sctp_cc_functions[]; extern const struct sctp_ss_functions sctp_ss_functions[]; @@ -87,50 +82,28 @@ sctp_init(struct protosw *pp SCTP_UNUSED, struct domain *dp SCTP_UNUSED) sctp_init(void) #endif { -#if !defined(__Panda__) && !defined(__Userspace__) +#if !defined(__Userspace__) u_long sb_max_adj; +#else + init_random(); #endif /* Initialize and modify the sysctled variables */ sctp_init_sysctls(); #if defined(__Userspace__) -#if defined(__Userspace_os_Windows) || defined(__Userspace_os_NaCl) -#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - srand(0); -#else - srand((unsigned int)time(NULL)); -#endif -#else -#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - srandom(0); -#else - srandom(getpid()); /* so inp->sctp_ep.random_numbers are truly random... */ -#endif -#endif -#endif -#if defined(__Panda__) - sctp_sendspace = SB_MAX; - sctp_recvspace = SB_MAX; - -#elif defined(__Userspace__) SCTP_BASE_SYSCTL(sctp_udp_tunneling_port) = port; #else -#if !defined(__APPLE__) - if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE) - SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8); -#endif - /* - * Allow a user to take no more than 1/2 the number of clusters or - * the SB_MAX whichever is smaller for the send window. - */ -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) sb_max_adj = (u_long)((u_quad_t) (sb_max) * MCLBYTES / (MSIZE + MCLBYTES)); -#else - sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES)); -#endif -#if defined(__APPLE__) SCTP_BASE_SYSCTL(sctp_sendspace) = sb_max_adj; #else + if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE) + SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8); + /* + * Allow a user to take no more than 1/2 the number of clusters or + * the SB_MAX, whichever is smaller, for the send window. + */ + sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES)); SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj, (((uint32_t)nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT)); #endif @@ -144,7 +117,7 @@ sctp_init(void) SCTP_BASE_VAR(first_time) = 0; SCTP_BASE_VAR(sctp_pcb_initialized) = 0; #if defined(__Userspace__) -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) #if defined(INET) || defined(INET6) SCTP_BASE_VAR(userspace_route) = -1; #endif @@ -161,11 +134,14 @@ sctp_init(void) SCTP_BASE_VAR(conn_output) = conn_output; SCTP_BASE_VAR(debug_printf) = debug_printf; SCTP_BASE_VAR(crc32c_offloaded) = 0; + SCTP_BASE_VAR(iterator_thread_started) = 0; + SCTP_BASE_VAR(timer_thread_started) = 0; #endif #if defined(__Userspace__) sctp_pcb_init(start_threads); - if (start_threads) - sctp_start_timer(); + if (start_threads) { + sctp_start_timer_thread(); + } #else sctp_pcb_init(); #endif @@ -174,22 +150,23 @@ sctp_init(void) SCTP_BASE_VAR(packet_log_end) = 0; memset(&SCTP_BASE_VAR(packet_log_buffer), 0, SCTP_PACKET_LOG_SIZE); #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_BASE_VAR(sctp_main_timer_ticks) = 0; sctp_start_main_timer(); timeout(sctp_delayed_startup, NULL, 1); #endif -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) SCTP_BASE_VAR(eh_tag) = EVENTHANDLER_REGISTER(rt_addrmsg, sctp_addr_change_event_handler, NULL, EVENTHANDLER_PRI_FIRST); #endif } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #ifdef VIMAGE static void sctp_finish(void *unused __unused) { + EVENTHANDLER_DEREGISTER(rt_addrmsg, SCTP_BASE_VAR(eh_tag)); sctp_pcb_finish(); } VNET_SYSUNINIT(sctp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, sctp_finish, NULL); @@ -198,7 +175,7 @@ VNET_SYSUNINIT(sctp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, sctp_finish, NULL); void sctp_finish(void) { -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) untimeout(sctp_delayed_startup, NULL); sctp_over_udp_stop(); sctp_address_monitor_stop(); @@ -208,61 +185,15 @@ sctp_finish(void) #if defined(INET) || defined(INET6) recv_thread_destroy(); #endif -#if !defined(__Userspace_os_Windows) -#if defined(INET) || defined(INET6) - if (SCTP_BASE_VAR(userspace_route) != -1) { - pthread_join(SCTP_BASE_VAR(recvthreadroute), NULL); - } -#endif -#endif -#ifdef INET - if (SCTP_BASE_VAR(userspace_rawsctp) != -1) { -#if defined(__Userspace_os_Windows) - WaitForSingleObject(SCTP_BASE_VAR(recvthreadraw), INFINITE); - CloseHandle(SCTP_BASE_VAR(recvthreadraw)); -#else - pthread_join(SCTP_BASE_VAR(recvthreadraw), NULL); -#endif - } - if (SCTP_BASE_VAR(userspace_udpsctp) != -1) { -#if defined(__Userspace_os_Windows) - WaitForSingleObject(SCTP_BASE_VAR(recvthreadudp), INFINITE); - CloseHandle(SCTP_BASE_VAR(recvthreadudp)); -#else - pthread_join(SCTP_BASE_VAR(recvthreadudp), NULL); -#endif - } -#endif -#ifdef INET6 - if (SCTP_BASE_VAR(userspace_rawsctp6) != -1) { -#if defined(__Userspace_os_Windows) - WaitForSingleObject(SCTP_BASE_VAR(recvthreadraw6), INFINITE); - CloseHandle(SCTP_BASE_VAR(recvthreadraw6)); -#else - pthread_join(SCTP_BASE_VAR(recvthreadraw6), NULL); -#endif - } - if (SCTP_BASE_VAR(userspace_udpsctp6) != -1) { -#if defined(__Userspace_os_Windows) - WaitForSingleObject(SCTP_BASE_VAR(recvthreadudp6), INFINITE); - CloseHandle(SCTP_BASE_VAR(recvthreadudp6)); -#else - pthread_join(SCTP_BASE_VAR(recvthreadudp6), NULL); -#endif - } -#endif - atomic_cmpset_int(&SCTP_BASE_VAR(timer_thread_should_exit), 0, 1); -#if defined(__Userspace_os_Windows) - WaitForSingleObject(SCTP_BASE_VAR(timer_thread), INFINITE); - CloseHandle(SCTP_BASE_VAR(timer_thread)); -#else - pthread_join(SCTP_BASE_VAR(timer_thread), NULL); -#endif + sctp_stop_timer_thread(); #endif sctp_pcb_finish(); -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) sctp_finish_sysctls(); #endif +#if defined(__Userspace__) + finish_random(); +#endif } #endif @@ -325,7 +256,7 @@ sctp_notify(struct sctp_inpcb *inp, uint16_t ip_len, uint32_t next_mtu) { -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif int timer_stopped; @@ -342,9 +273,7 @@ sctp_notify(struct sctp_inpcb *inp, (icmp_code == ICMP_UNREACH_ISOLATED) || (icmp_code == ICMP_UNREACH_NET_PROHIB) || (icmp_code == ICMP_UNREACH_HOST_PROHIB) || -#if defined(__Panda__) - (icmp_code == ICMP_UNREACH_ADMIN)) { -#elif defined(__Userspace_os_NetBSD) +#if defined(__NetBSD__) (icmp_code == ICMP_UNREACH_ADMIN_PROHIBIT)) { #else (icmp_code == ICMP_UNREACH_FILTER_PROHIB)) { @@ -362,8 +291,8 @@ sctp_notify(struct sctp_inpcb *inp, } else if ((icmp_code == ICMP_UNREACH_PROTOCOL) || (icmp_code == ICMP_UNREACH_PORT)) { /* Treat it like an ABORT. */ - sctp_abort_notification(stcb, 1, 0, NULL, SCTP_SO_NOT_LOCKED); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) + sctp_abort_notification(stcb, true, false, 0, NULL, SCTP_SO_NOT_LOCKED); +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(inp); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -373,7 +302,7 @@ sctp_notify(struct sctp_inpcb *inp, #endif (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); /* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed.*/ #endif @@ -407,7 +336,7 @@ sctp_notify(struct sctp_inpcb *inp, } if (net->mtu > next_mtu) { net->mtu = next_mtu; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (net->port) { sctp_hc_set_mtu(&net->ro._l_addr, inp->fibnum, next_mtu + sizeof(struct udphdr)); } else { @@ -430,19 +359,15 @@ sctp_notify(struct sctp_inpcb *inp, } #endif -#if !defined(__Panda__) && !defined(__Userspace__) -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) void -#else -void * -#endif #if defined(__APPLE__) && !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) && !defined(APPLE_ELCAPITAN) sctp_ctlinput(int cmd, struct sockaddr *sa, void *vip, struct ifnet *ifp SCTP_UNUSED) #else sctp_ctlinput(int cmd, struct sockaddr *sa, void *vip) #endif { -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) struct ip *outer_ip; #endif struct ip *inner_ip; @@ -451,33 +376,25 @@ sctp_ctlinput(int cmd, struct sockaddr *sa, void *vip) struct sctp_inpcb *inp; struct sctp_tcb *stcb; struct sctp_nets *net; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) struct sctp_init_chunk *ch; #endif struct sockaddr_in src, dst; if (sa->sa_family != AF_INET || ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) { -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) return; -#else - return (NULL); -#endif } if (PRC_IS_REDIRECT(cmd)) { vip = NULL; } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) { -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) return; -#else - return (NULL); -#endif } if (vip != NULL) { inner_ip = (struct ip *)vip; icmp = (struct icmp *)((caddr_t)inner_ip - (sizeof(struct icmp) - sizeof(struct ip))); -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip)); #endif sh = (struct sctphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2)); @@ -521,7 +438,7 @@ sctp_ctlinput(int cmd, struct sockaddr *sa, void *vip) return; } } else { -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (ntohs(outer_ip->ip_len) >= sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + 20) { @@ -548,7 +465,7 @@ sctp_ctlinput(int cmd, struct sockaddr *sa, void *vip) sctp_notify(inp, stcb, net, icmp->icmp_type, icmp->icmp_code, -#if defined(__FreeBSD__) && __FreeBSD_version >= 1000000 +#if defined(__FreeBSD__) && !defined(__Userspace__) ntohs(inner_ip->ip_len), #else inner_ip->ip_len, @@ -573,16 +490,6 @@ sctp_ctlinput(int cmd, struct sockaddr *sa, void *vip) } #endif } else { -#if defined(__FreeBSD__) && __FreeBSD_version < 500000 - /* - * XXX must be fixed for 5.x and higher, leave for - * 4.x - */ - if (PRC_IS_REDIRECT(cmd) && (inp != NULL)) { - in_rtchange((struct inpcb *)inp, - inetctlerrmap[cmd]); - } -#endif if ((stcb == NULL) && (inp != NULL)) { /* reduce ref-count */ SCTP_INP_WLOCK(inp); @@ -594,16 +501,12 @@ sctp_ctlinput(int cmd, struct sockaddr *sa, void *vip) } } } -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) return; -#else - return (NULL); -#endif } #endif #endif -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) static int sctp_getcred(SYSCTL_HANDLER_ARGS) { @@ -618,14 +521,8 @@ sctp_getcred(SYSCTL_HANDLER_ARGS) /* FIX, for non-bsd is this right? */ vrf_id = SCTP_DEFAULT_VRFID; -#if __FreeBSD_version > 602000 error = priv_check(req->td, PRIV_NETINET_GETCRED); -#elif __FreeBSD_version >= 500000 - error = suser(req->td); -#else - error = suser(req->p); -#endif if (error) return (error); @@ -670,27 +567,30 @@ out: return (error); } -SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW, - 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection"); -#endif /* #if defined(__FreeBSD__) */ - +SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, + CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_NEEDGIANT, + 0, 0, sctp_getcred, "S,ucred", + "Get the ucred of a SCTP connection"); +#endif #ifdef INET -#if defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) +#if defined(_WIN32) || defined(__Userspace__) int -#elif defined(__FreeBSD__) && __FreeBSD_version > 690000 +#elif defined(__FreeBSD__) static void #else static int #endif sctp_abort(struct socket *so) { +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct epoch_tracker et; +#endif struct sctp_inpcb *inp; - uint32_t flags; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { -#if defined(__FreeBSD__) && __FreeBSD_version > 690000 +#if defined(__FreeBSD__) && !defined(__Userspace__) return; #else SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); @@ -698,16 +598,19 @@ sctp_abort(struct socket *so) #endif } - sctp_must_try_again: - flags = inp->sctp_flags; + SCTP_INP_WLOCK(inp); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 17); #endif - if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && - (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { + if (((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)) { + inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP; #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 16); #endif + SCTP_INP_WUNLOCK(inp); sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, SCTP_CALLED_AFTER_CMPSET_OFCLOSE); SOCK_LOCK(so); @@ -717,7 +620,7 @@ sctp_abort(struct socket *so) */ SCTP_SB_CLEAR(so->so_rcv); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) so->so_usecount--; #else /* Now null out the reference, we are completely detached. */ @@ -725,28 +628,25 @@ sctp_abort(struct socket *so) #endif SOCK_UNLOCK(so); } else { - flags = inp->sctp_flags; - if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { - goto sctp_must_try_again; - } + SCTP_INP_WUNLOCK(inp); } -#if defined(__FreeBSD__) && __FreeBSD_version > 690000 - return; +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); #else return (0); #endif } -#if defined(__Panda__) || defined(__Userspace__) +#if defined(__Userspace__) int #else static int #endif -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 -sctp_attach(struct socket *so, int proto SCTP_UNUSED, struct thread *p SCTP_UNUSED) -#elif defined(__Panda__) || defined(__Userspace__) +#if defined(__Userspace__) sctp_attach(struct socket *so, int proto SCTP_UNUSED, uint32_t vrf_id) -#elif defined(__Windows__) +#elif defined(__FreeBSD__) +sctp_attach(struct socket *so, int proto SCTP_UNUSED, struct thread *p SCTP_UNUSED) +#elif defined(_WIN32) sctp_attach(struct socket *so, int proto SCTP_UNUSED, PKTHREAD p SCTP_UNUSED) #else sctp_attach(struct socket *so, int proto SCTP_UNUSED, struct proc *p SCTP_UNUSED) @@ -755,7 +655,7 @@ sctp_attach(struct socket *so, int proto SCTP_UNUSED, struct proc *p SCTP_UNUSED struct sctp_inpcb *inp; struct inpcb *ip_inp; int error; -#if !defined(__Panda__) && !defined(__Userspace__) +#if !defined(__Userspace__) uint32_t vrf_id = SCTP_DEFAULT_VRFID; #endif @@ -784,18 +684,18 @@ sctp_attach(struct socket *so, int proto SCTP_UNUSED, struct proc *p SCTP_UNUSED return (0); } -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 -static int -sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p) -{ -#elif defined(__FreeBSD__) || defined(__APPLE__) -static int -sctp_bind(struct socket *so, struct sockaddr *addr, struct proc *p) { -#elif defined(__Panda__) || defined(__Userspace__) +#if defined(__Userspace__) int sctp_bind(struct socket *so, struct sockaddr *addr) { void *p = NULL; -#elif defined(__Windows__) +#elif defined(__FreeBSD__) +static int +sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p) +{ +#elif defined(__APPLE__) +static int +sctp_bind(struct socket *so, struct sockaddr *addr, struct proc *p) { +#elif defined(_WIN32) static int sctp_bind(struct socket *so, struct sockaddr *addr, PKTHREAD p) { #else @@ -887,12 +787,14 @@ sctpconn_bind(struct socket *so, struct sockaddr *addr) } #endif -#if (defined(__FreeBSD__) && __FreeBSD_version > 690000) || defined(__Windows__) || defined(__Userspace__) +#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__) void sctp_close(struct socket *so) { +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct epoch_tracker et; +#endif struct sctp_inpcb *inp; - uint32_t flags; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) @@ -901,13 +803,15 @@ sctp_close(struct socket *so) /* Inform all the lower layer assoc that we * are done. */ - sctp_must_try_again: - flags = inp->sctp_flags; + SCTP_INP_WLOCK(inp); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 17); #endif - if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) && - (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) { + if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { + inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP; #if defined(__Userspace__) if (((so->so_options & SCTP_SO_LINGER) && (so->so_linger == 0)) || (so->so_rcv.sb_cc > 0)) { @@ -918,12 +822,14 @@ sctp_close(struct socket *so) #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 13); #endif + SCTP_INP_WUNLOCK(inp); sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, SCTP_CALLED_AFTER_CMPSET_OFCLOSE); } else { #ifdef SCTP_LOG_CLOSING sctp_log_closing(inp, NULL, 14); #endif + SCTP_INP_WUNLOCK(inp); sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE, SCTP_CALLED_AFTER_CMPSET_OFCLOSE); } @@ -937,23 +843,19 @@ sctp_close(struct socket *so) */ SCTP_SB_CLEAR(so->so_rcv); -#if !defined(__APPLE__) +#if !(defined(__APPLE__) && !defined(__Userspace__)) /* Now null out the reference, we are completely detached. */ so->so_pcb = NULL; #endif SOCK_UNLOCK(so); } else { - flags = inp->sctp_flags; - if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { - goto sctp_must_try_again; - } + SCTP_INP_WUNLOCK(inp); } - return; +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif } - #else - - int sctp_detach(struct socket *so) { @@ -962,7 +864,7 @@ sctp_detach(struct socket *so) inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { -#if defined(__FreeBSD__) && __FreeBSD_version > 690000 +#if defined(__FreeBSD__) && !defined(__Userspace__) return; #else SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); @@ -1003,7 +905,7 @@ sctp_detach(struct socket *so) * here for the accounting/select. */ SCTP_SB_CLEAR(so->so_rcv); -#if !defined(__APPLE__) +#if !(defined(__APPLE__) && !defined(__Userspace__)) /* Now disconnect */ so->so_pcb = NULL; #endif @@ -1013,7 +915,7 @@ sctp_detach(struct socket *so) goto sctp_must_try_again; } } -#if defined(__FreeBSD__) && __FreeBSD_version > 690000 +#if defined(__FreeBSD__) && !defined(__Userspace__) return; #else return (0); @@ -1024,9 +926,9 @@ sctp_detach(struct socket *so) #if defined(__Userspace__) /* __Userspace__ is not calling sctp_sendm */ #endif -#if !(defined(__Panda__) || defined(__Windows__)) +#if !(defined(_WIN32) && !defined(__Userspace__)) int -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 +#if defined(__FreeBSD__) && !defined(__Userspace__) sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p); @@ -1035,9 +937,8 @@ sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct proc *p); #endif - int -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 +#if defined(__FreeBSD__) && !defined(__Userspace__) sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p) { @@ -1064,9 +965,22 @@ sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))) { goto connected_type; - } else if (addr == NULL) { + } + + error = 0; + if (addr == NULL) { SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); error = EDESTADDRREQ; + } else if (addr->sa_family != AF_INET) { + SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT); + error = EAFNOSUPPORT; +#if defined(HAVE_SA_LEN) + } else if (addr->sa_len != sizeof(struct sockaddr_in)) { + SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); + error = EINVAL; +#endif + } + if (error != 0) { sctp_m_freem(m); if (control) { sctp_m_freem(control); @@ -1074,19 +988,6 @@ sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, } return (error); } -#ifdef INET6 - if (addr->sa_family != AF_INET) { - /* must be a v4 address! */ - SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ); - sctp_m_freem(m); - if (control) { - sctp_m_freem(control); - control = NULL; - } - error = EDESTADDRREQ; - return (error); - } -#endif /* INET6 */ connected_type: /* now what about control */ if (control) { @@ -1104,7 +1005,7 @@ connected_type: inp->pkt_last = inp->pkt = m; } if ( -#if defined(__FreeBSD__) || defined(__APPLE__) +#if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__) /* FreeBSD uses a flag passed */ ((flags & PRUS_MORETOCOME) == 0) #else @@ -1120,9 +1021,18 @@ connected_type: * definitions) but this is not advisable. This code is used * by FreeBSD when sending a file with sendfile() though. */ +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct epoch_tracker et; +#endif int ret; +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif inp->pkt = NULL; inp->control = NULL; return (ret); @@ -1150,6 +1060,9 @@ sctp_disconnect(struct socket *so) SCTP_INP_RUNLOCK(inp); return (0); } else { +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct epoch_tracker et; +#endif struct sctp_association *asoc; struct sctp_tcb *stcb; @@ -1167,6 +1080,9 @@ sctp_disconnect(struct socket *so) SCTP_INP_RUNLOCK(inp); return (0); } +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif #if defined(__Userspace__) if (((so->so_options & SCTP_SO_LINGER) && (so->so_linger == 0)) || @@ -1192,6 +1108,9 @@ sctp_disconnect(struct socket *so) (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3); /* No unlock tcb assoc is gone */ +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif return (0); } if (TAILQ_EMPTY(&asoc->send_queue) && @@ -1219,9 +1138,9 @@ sctp_disconnect(struct socket *so) } sctp_send_shutdown(stcb,netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, - stcb->sctp_ep, stcb, netp); + stcb->sctp_ep, stcb, netp); sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, - stcb->sctp_ep, stcb, netp); + stcb->sctp_ep, stcb, NULL); sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED); } } else { @@ -1235,16 +1154,8 @@ sctp_disconnect(struct socket *so) * we will allow user data to be sent first * and move to SHUTDOWN-PENDING */ - struct sctp_nets *netp; - if (stcb->asoc.alternate) { - netp = stcb->asoc.alternate; - } else { - netp = stcb->asoc.primary_destination; - } - SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); - sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, - netp); + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, NULL); if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) { SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); } @@ -1264,12 +1175,18 @@ sctp_disconnect(struct socket *so) SCTP_INP_RUNLOCK(inp); (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif return (0); } else { sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); } } soisdisconnecting(so); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); return (0); @@ -1283,7 +1200,7 @@ sctp_disconnect(struct socket *so) } } -#if defined(__FreeBSD__) || defined(__Windows__) || defined(__Userspace__) +#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__) int sctp_flush(struct socket *so, int how) { @@ -1328,7 +1245,6 @@ sctp_flush(struct socket *so, int how) so->so_snd.sb_cc = 0; so->so_snd.sb_mbcnt = 0; so->so_snd.sb_mb = NULL; - } return (0); } @@ -1349,7 +1265,7 @@ sctp_shutdown(struct socket *so) if (!((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { /* Restore the flags that the soshutdown took away. */ -#if (defined(__FreeBSD__) && __FreeBSD_version >= 502115) || defined(__Windows__) +#if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__) SOCKBUF_LOCK(&so->so_rcv); so->so_rcv.sb_state &= ~SBS_CANTRCVMORE; SOCKBUF_UNLOCK(&so->so_rcv); @@ -1368,6 +1284,9 @@ sctp_shutdown(struct socket *so) * a SHUT_WR or SHUT_RDWR. * This means we put the shutdown flag against it. */ +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct epoch_tracker et; +#endif struct sctp_tcb *stcb; struct sctp_association *asoc; struct sctp_nets *netp; @@ -1406,6 +1325,9 @@ sctp_shutdown(struct socket *so) SCTP_INP_RUNLOCK(inp); return (0); } +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif if (stcb->asoc.alternate) { netp = stcb->asoc.alternate; } else { @@ -1443,15 +1365,21 @@ sctp_shutdown(struct socket *so) stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6; SCTP_INP_RUNLOCK(inp); sctp_abort_an_association(stcb->sctp_ep, stcb, - op_err, SCTP_SO_LOCKED); + op_err, false, SCTP_SO_LOCKED); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif return (0); } } - sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, netp); + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, NULL); /* XXX: Why do this in the case where we have still data queued? */ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); SCTP_TCB_UNLOCK(stcb); SCTP_INP_RUNLOCK(inp); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif return (0); } } @@ -1461,33 +1389,33 @@ sctp_shutdown(struct socket *so) * returns 0 on success, 1 on error */ static uint32_t -sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa) +sctp_fill_user_address(struct sockaddr *dst, struct sockaddr *src) { #ifdef INET6 #if defined(SCTP_EMBEDDED_V6_SCOPE) struct sockaddr_in6 lsa6; - sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa, + src = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)src, &lsa6); #endif #endif #ifdef HAVE_SA_LEN - memcpy(ss, sa, sa->sa_len); + memcpy(dst, src, src->sa_len); #else - switch (sa->sa_family) { + switch (src->sa_family) { #ifdef INET case AF_INET: - memcpy(ss, sa, sizeof(struct sockaddr_in)); + memcpy(dst, src, sizeof(struct sockaddr_in)); break; #endif #ifdef INET6 case AF_INET6: - memcpy(ss, sa, sizeof(struct sockaddr_in6)); + memcpy(dst, src, sizeof(struct sockaddr_in6)); break; #endif #if defined(__Userspace__) case AF_CONN: - memcpy(ss, sa, sizeof(struct sockaddr_conn)); + memcpy(dst, src, sizeof(struct sockaddr_conn)); break; #endif default: @@ -1498,17 +1426,12 @@ sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa) return (0); } - - -/* - * NOTE: assumes addr lock is held - */ static size_t sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, - struct sctp_tcb *stcb, - size_t limit, - struct sockaddr_storage *sas, - uint32_t vrf_id) + struct sctp_tcb *stcb, + size_t limit, + struct sockaddr *addr, + uint32_t vrf_id) { struct sctp_ifn *sctp_ifn; struct sctp_ifa *sctp_ifa; @@ -1525,8 +1448,9 @@ sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, #endif struct sctp_vrf *vrf; + SCTP_IPI_ADDR_LOCK_ASSERT(); actual = 0; - if (limit <= 0) + if (limit == 0) return (actual); if (stcb) { @@ -1613,7 +1537,7 @@ sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, * is one of those we must skip it. */ if (sctp_is_addr_restricted(stcb, - sctp_ifa)) { + sctp_ifa)) { continue; } } @@ -1631,7 +1555,7 @@ sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, */ continue; } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip4(inp->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { continue; @@ -1646,18 +1570,18 @@ sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, if (actual + sizeof(struct sockaddr_in6) > limit) { return (actual); } - in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas); - ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; - sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6)); + in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)addr); + ((struct sockaddr_in6 *)addr)->sin6_port = inp->sctp_lport; + addr = (struct sockaddr *)((caddr_t)addr + sizeof(struct sockaddr_in6)); actual += sizeof(struct sockaddr_in6); } else { #endif if (actual + sizeof(struct sockaddr_in) > limit) { return (actual); } - memcpy(sas, sin, sizeof(struct sockaddr_in)); - ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport; - sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in)); + memcpy(addr, sin, sizeof(struct sockaddr_in)); + ((struct sockaddr_in *)addr)->sin_port = inp->sctp_lport; + addr = (struct sockaddr *)((caddr_t)addr + sizeof(struct sockaddr_in)); actual += sizeof(struct sockaddr_in); #ifdef INET6 } @@ -1683,7 +1607,7 @@ sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, */ continue; } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip6(inp->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { continue; @@ -1725,9 +1649,9 @@ sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, if (actual + sizeof(struct sockaddr_in6) > limit) { return (actual); } - memcpy(sas, sin6, sizeof(struct sockaddr_in6)); - ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; - sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6)); + memcpy(addr, sin6, sizeof(struct sockaddr_in6)); + ((struct sockaddr_in6 *)addr)->sin6_port = inp->sctp_lport; + addr = (struct sockaddr *)((caddr_t)addr + sizeof(struct sockaddr_in6)); actual += sizeof(struct sockaddr_in6); } else { continue; @@ -1740,9 +1664,9 @@ sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, if (actual + sizeof(struct sockaddr_conn) > limit) { return (actual); } - memcpy(sas, &sctp_ifa->address.sconn, sizeof(struct sockaddr_conn)); - ((struct sockaddr_conn *)sas)->sconn_port = inp->sctp_lport; - sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_conn)); + memcpy(addr, &sctp_ifa->address.sconn, sizeof(struct sockaddr_conn)); + ((struct sockaddr_conn *)addr)->sconn_port = inp->sctp_lport; + addr = (struct sockaddr *)((caddr_t)addr + sizeof(struct sockaddr_conn)); actual += sizeof(struct sockaddr_conn); } else { continue; @@ -1792,29 +1716,29 @@ sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp, if (actual + sa_len > limit) { return (actual); } - if (sctp_fill_user_address(sas, &laddr->ifa->address.sa)) + if (sctp_fill_user_address(addr, &laddr->ifa->address.sa)) continue; switch (laddr->ifa->address.sa.sa_family) { #ifdef INET case AF_INET: - ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport; + ((struct sockaddr_in *)addr)->sin_port = inp->sctp_lport; break; #endif #ifdef INET6 case AF_INET6: - ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport; + ((struct sockaddr_in6 *)addr)->sin6_port = inp->sctp_lport; break; #endif #if defined(__Userspace__) case AF_CONN: - ((struct sockaddr_conn *)sas)->sconn_port = inp->sctp_lport; + ((struct sockaddr_conn *)addr)->sconn_port = inp->sctp_lport; break; #endif default: /* TSNH */ break; } - sas = (struct sockaddr_storage *)((caddr_t)sas + sa_len); + addr = (struct sockaddr *)((caddr_t)addr + sa_len); actual += sa_len; } } @@ -1825,7 +1749,7 @@ static size_t sctp_fill_up_addresses(struct sctp_inpcb *inp, struct sctp_tcb *stcb, size_t limit, - struct sockaddr_storage *sas) + struct sockaddr *addr) { size_t size = 0; #ifdef SCTP_MVRF @@ -1840,22 +1764,19 @@ sctp_fill_up_addresses(struct sctp_inpcb *inp, */ /* fill up addresses for all VRFs on the endpoint */ for (id = 0; (id < inp->num_vrfs) && (size < limit); id++) { - size += sctp_fill_up_addresses_vrf(inp, stcb, limit, sas, - inp->m_vrf_ids[id]); - sas = (struct sockaddr_storage *)((caddr_t)sas + size); + size += sctp_fill_up_addresses_vrf(inp, stcb, limit, addr, + inp->m_vrf_ids[id]); + addr = (struct sockaddr *)((caddr_t)addr + size); } #else /* fill up addresses for the endpoint's default vrf */ - size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas, - inp->def_vrf_id); + size = sctp_fill_up_addresses_vrf(inp, stcb, limit, addr, + inp->def_vrf_id); #endif SCTP_IPI_ADDR_RUNLOCK(); return (size); } -/* - * NOTE: assumes addr lock is held - */ static int sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id) { @@ -1869,6 +1790,7 @@ sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id) * bound-all case a TCB may NOT include the loopback or other * addresses as well. */ + SCTP_IPI_ADDR_LOCK_ASSERT(); vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) { return (0); @@ -2056,28 +1978,22 @@ sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval, /* FIX ME: do we want to pass in a vrf on the connect call? */ vrf_id = inp->def_vrf_id; - /* We are GOOD to go */ - stcb = sctp_aloc_assoc(inp, sa, &error, 0, vrf_id, - inp->sctp_ep.pre_open_stream_count, - inp->sctp_ep.port, -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 - (struct thread *)p, -#elif defined(__Windows__) - (PKTHREAD)p, + stcb = sctp_aloc_assoc_connected(inp, sa, &error, 0, 0, vrf_id, + inp->sctp_ep.pre_open_stream_count, + inp->sctp_ep.port, +#if defined(__FreeBSD__) && !defined(__Userspace__) + (struct thread *)p, +#elif defined(_WIN32) && !defined(__Userspace__) + (PKTHREAD)p, #else - (struct proc *)p, + (struct proc *)p, #endif - SCTP_INITIALIZE_AUTH_PARAMS); + SCTP_INITIALIZE_AUTH_PARAMS); if (stcb == NULL) { /* Gak! no memory */ goto out_now; } - if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { - stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; - /* Set the connected flag so we can queue data */ - soisconnecting(so); - } SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); /* move to second address */ switch (sa->sa_family) { @@ -2142,7 +2058,6 @@ sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval, } \ } - #define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\ if (size < sizeof(type)) { \ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \ @@ -2153,7 +2068,7 @@ sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval, } \ } -#if defined(__Panda__) || defined(__Userspace__) +#if defined(__Userspace__) int #else static int @@ -2213,7 +2128,7 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, break; case SCTP_AUTOCLOSE: if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) - val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time); + val = sctp_ticks_to_secs(inp->sctp_ep.auto_close_time); else val = 0; break; @@ -2643,7 +2558,7 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (sack->sack_assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); - sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); + sack->sack_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); sack->sack_freq = inp->sctp_ep.sctp_sack_freq; SCTP_INP_RUNLOCK(inp); } else { @@ -2677,20 +2592,6 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, } case SCTP_MAX_BURST: { -#if defined(__FreeBSD__) && __FreeBSD_version < 900000 - uint8_t *value; - - SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize); - - SCTP_INP_RLOCK(inp); - if (inp->sctp_ep.max_burst < 256) { - *value = inp->sctp_ep.max_burst; - } else { - *value = 255; - } - SCTP_INP_RUNLOCK(inp); - *optsize = sizeof(uint8_t); -#else struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); @@ -2715,7 +2616,6 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, if (error == 0) { *optsize = sizeof(struct sctp_assoc_value); } -#endif break; } case SCTP_MAXSEG: @@ -2893,7 +2793,7 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, */ { size_t cpsz, left; - struct sockaddr_storage *sas; + struct sockaddr *addr; struct sctp_nets *net; struct sctp_getaddresses *saddr; @@ -2901,9 +2801,9 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); if (stcb) { - left = (*optsize) - sizeof(sctp_assoc_t); - *optsize = sizeof(sctp_assoc_t); - sas = (struct sockaddr_storage *)&saddr->addr[0]; + left = *optsize - offsetof(struct sctp_getaddresses, addr); + *optsize = offsetof(struct sctp_getaddresses, addr); + addr = &saddr->addr[0].sa; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { switch (net->ro._l_addr.sa.sa_family) { @@ -2946,16 +2846,16 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, (net->ro._l_addr.sa.sa_family == AF_INET)) { /* Must map the address */ in6_sin_2_v4mapsin6(&net->ro._l_addr.sin, - (struct sockaddr_in6 *)sas); + (struct sockaddr_in6 *)addr); } else { - memcpy(sas, &net->ro._l_addr, cpsz); + memcpy(addr, &net->ro._l_addr, cpsz); } #else - memcpy(sas, &net->ro._l_addr, cpsz); + memcpy(addr, &net->ro._l_addr, cpsz); #endif - ((struct sockaddr_in *)sas)->sin_port = stcb->rport; + ((struct sockaddr_in *)addr)->sin_port = stcb->rport; - sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz); + addr = (struct sockaddr *)((caddr_t)addr + cpsz); left -= cpsz; *optsize += cpsz; } @@ -2969,19 +2869,17 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, case SCTP_GET_LOCAL_ADDRESSES: { size_t limit, actual; - struct sockaddr_storage *sas; struct sctp_getaddresses *saddr; SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize); SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id); - sas = (struct sockaddr_storage *)&saddr->addr[0]; - limit = *optsize - sizeof(sctp_assoc_t); - actual = sctp_fill_up_addresses(inp, stcb, limit, sas); + limit = *optsize - offsetof(struct sctp_getaddresses, addr); + actual = sctp_fill_up_addresses(inp, stcb, limit, &saddr->addr[0].sa); if (stcb) { SCTP_TCB_UNLOCK(stcb); } - *optsize = sizeof(sctp_assoc_t) + actual; + *optsize = offsetof(struct sctp_getaddresses, addr) + actual; break; } case SCTP_PEER_ADDR_PARAMS: @@ -3164,7 +3062,7 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, /* Use endpoint defaults */ SCTP_INP_RLOCK(inp); paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure; - paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); + paddrp->spp_hbinterval = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); paddrp->spp_assoc_id = SCTP_FUTURE_ASSOC; /* get inp's default */ if (inp->sctp_ep.default_dscp & 0x01) { @@ -3323,8 +3221,6 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, */ sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue + stcb->asoc.cnt_on_all_streams); - - sstat->sstat_instrms = stcb->asoc.streamincnt; sstat->sstat_outstrms = stcb->asoc.streamoutcnt; sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc); @@ -3332,17 +3228,33 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, if (net != NULL) { #ifdef HAVE_SA_LEN memcpy(&sstat->sstat_primary.spinfo_address, - &stcb->asoc.primary_destination->ro._l_addr, - ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len); + &net->ro._l_addr, + ((struct sockaddr *)(&net->ro._l_addr))->sa_len); #else - if (stcb->asoc.primary_destination->ro._l_addr.sa.sa_family == AF_INET) { + switch (stcb->asoc.primary_destination->ro._l_addr.sa.sa_family) { +#if defined(INET) + case AF_INET: memcpy(&sstat->sstat_primary.spinfo_address, - &stcb->asoc.primary_destination->ro._l_addr, + &net->ro._l_addr, sizeof(struct sockaddr_in)); - } else { + break; +#endif +#if defined(INET6) + case AF_INET6: memcpy(&sstat->sstat_primary.spinfo_address, - &stcb->asoc.primary_destination->ro._l_addr, + &net->ro._l_addr, sizeof(struct sockaddr_in6)); + break; +#endif +#if defined(__Userspace__) + case AF_CONN: + memcpy(&sstat->sstat_primary.spinfo_address, + &net->ro._l_addr, + sizeof(struct sockaddr_conn)); + break; +#endif + default: + break; } #endif ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport; @@ -3454,7 +3366,7 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); if (stcb) { - sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life); + sasoc->sasoc_cookie_life = sctp_ticks_to_msecs(stcb->asoc.cookie_life); sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times; sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets; sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd; @@ -3466,11 +3378,11 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (sasoc->sasoc_assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_RLOCK(inp); - sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life); + sasoc->sasoc_cookie_life = sctp_ticks_to_msecs(inp->sctp_ep.def_cookie_life); sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times; sasoc->sasoc_number_peer_destinations = 0; sasoc->sasoc_peer_rwnd = 0; - sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv); + sasoc->sasoc_local_rwnd = (uint32_t)sbspace(&inp->sctp_socket->so_rcv); SCTP_INP_RUNLOCK(inp); } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); @@ -3576,7 +3488,7 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, { struct sctp_hmacalgo *shmac; sctp_hmaclist_t *hmaclist; - uint32_t size; + size_t size; int i; SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize); @@ -3591,8 +3503,8 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, } /* is there room for all of the hmac ids? */ size = sizeof(*shmac) + (hmaclist->num_algo * - sizeof(shmac->shmac_idents[0])); - if ((size_t)(*optsize) < size) { + sizeof(shmac->shmac_idents[0])); + if (*optsize < size) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; SCTP_INP_RUNLOCK(inp); @@ -3817,43 +3729,27 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, break; } case SCTP_RECVRCVINFO: - { - int onoff; - if (*optsize < sizeof(int)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } else { SCTP_INP_RLOCK(inp); - onoff = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO); + *(int *)optval = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO); SCTP_INP_RUNLOCK(inp); - } - if (error == 0) { - /* return the option value */ - *(int *)optval = onoff; *optsize = sizeof(int); } break; - } case SCTP_RECVNXTINFO: - { - int onoff; - if (*optsize < sizeof(int)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; } else { SCTP_INP_RLOCK(inp); - onoff = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO); + *(int *)optval = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO); SCTP_INP_RUNLOCK(inp); - } - if (error == 0) { - /* return the option value */ - *(int *)optval = onoff; *optsize = sizeof(int); } break; - } case SCTP_DEFAULT_SNDINFO: { struct sctp_sndinfo *info; @@ -4498,7 +4394,7 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, return (error); } -#if defined(__Panda__) || defined(__Userspace__) +#if defined(__Userspace__) int #else static int @@ -4587,7 +4483,7 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, * The value is in ticks. Note this does not effect * old associations, only new ones. */ - inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt); + inp->sctp_ep.auto_close_time = sctp_secs_to_ticks(*mopt); break; } SCTP_INP_WLOCK(inp); @@ -5089,10 +4985,12 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize); SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id); if (sack->sack_delay) { - if (sack->sack_delay > SCTP_MAX_SACK_DELAY) - sack->sack_delay = SCTP_MAX_SACK_DELAY; - if (MSEC_TO_TICKS(sack->sack_delay) < 1) { - sack->sack_delay = TICKS_TO_MSEC(1); + if (sack->sack_delay > SCTP_MAX_SACK_DELAY) { + error = EINVAL; + if (stcb != NULL) { + SCTP_TCB_UNLOCK(stcb); + } + break; } } if (stcb) { @@ -5111,7 +5009,7 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, (sack->sack_assoc_id == SCTP_ALL_ASSOC)))) { SCTP_INP_WLOCK(inp); if (sack->sack_delay) { - inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay); + inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = sctp_msecs_to_ticks(sack->sack_delay); } if (sack->sack_freq) { inp->sctp_ep.sctp_sack_freq = sack->sack_freq; @@ -5165,7 +5063,7 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, if (sca->sca_keylength == 0) { size = optsize - sizeof(struct sctp_authkey); } else { - if (sca->sca_keylength + sizeof(struct sctp_authkey) <= optsize) { + if (sca->sca_keylength + sizeof(struct sctp_authkey) <= optsize) { size = sca->sca_keylength; } else { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); @@ -5494,7 +5392,6 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, } SCTP_INP_RUNLOCK(inp); } - } break; } @@ -5733,7 +5630,7 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, break; } /* Do any streams have data queued? */ - for ( i = 0; i< stcb->asoc.streamoutcnt; i++) { + for (i = 0; i < stcb->asoc.streamoutcnt; i++) { if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { goto busy_out; } @@ -5811,15 +5708,6 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, } case SCTP_MAX_BURST: { -#if defined(__FreeBSD__) && __FreeBSD_version < 900000 - uint8_t *burst; - - SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize); - - SCTP_INP_WLOCK(inp); - inp->sctp_ep.max_burst = *burst; - SCTP_INP_WUNLOCK(inp); -#else struct sctp_assoc_value *av; SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); @@ -5850,7 +5738,6 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, SCTP_INP_RUNLOCK(inp); } } -#endif break; } case SCTP_MAXSEG: @@ -6152,8 +6039,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, if ((stcb != NULL) && (net == NULL)) { #ifdef INET if (addr->sa_family == AF_INET) { - struct sockaddr_in *sin; + sin = (struct sockaddr_in *)addr; if (sin->sin_addr.s_addr != INADDR_ANY) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); @@ -6211,6 +6098,7 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, return (EINVAL); } if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && + (paddrp->spp_pathmtu > 0) && ((paddrp->spp_pathmtu < SCTP_SMALLEST_PMTU) || (paddrp->spp_pathmtu > SCTP_LARGEST_PMTU))) { if (stcb) @@ -6255,28 +6143,30 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11); } net->dest_state |= SCTP_ADDR_NO_PMTUD; - net->mtu = paddrp->spp_pathmtu; - switch (net->ro._l_addr.sa.sa_family) { + if (paddrp->spp_pathmtu > 0) { + net->mtu = paddrp->spp_pathmtu; + switch (net->ro._l_addr.sa.sa_family) { #ifdef INET - case AF_INET: - net->mtu += SCTP_MIN_V4_OVERHEAD; - break; + case AF_INET: + net->mtu += SCTP_MIN_V4_OVERHEAD; + break; #endif #ifdef INET6 - case AF_INET6: - net->mtu += SCTP_MIN_OVERHEAD; - break; + case AF_INET6: + net->mtu += SCTP_MIN_OVERHEAD; + break; #endif #if defined(__Userspace__) - case AF_CONN: - net->mtu += sizeof(struct sctphdr); - break; + case AF_CONN: + net->mtu += sizeof(struct sctphdr); + break; #endif - default: - break; - } - if (net->mtu < stcb->asoc.smallest_mtu) { - sctp_pathmtu_adjustment(stcb, net->mtu); + default: + break; + } + if (net->mtu < stcb->asoc.smallest_mtu) { + sctp_pathmtu_adjustment(stcb, net->mtu); + } } } if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { @@ -6285,7 +6175,7 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, } net->dest_state &= ~SCTP_ADDR_NO_PMTUD; } - if (paddrp->spp_pathmaxrxt) { + if (paddrp->spp_pathmaxrxt > 0) { if (net->dest_state & SCTP_ADDR_PF) { if (net->error_count > paddrp->spp_pathmaxrxt) { net->dest_state &= ~SCTP_ADDR_PF; @@ -6328,7 +6218,7 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, #endif } else { /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/ - if (paddrp->spp_pathmaxrxt != 0) { + if (paddrp->spp_pathmaxrxt > 0) { stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt; TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { if (net->dest_state & SCTP_ADDR_PF) { @@ -6360,7 +6250,6 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, net->failure_threshold = paddrp->spp_pathmaxrxt; } } - if (paddrp->spp_flags & SPP_HB_ENABLE) { if (paddrp->spp_hbinterval != 0) { stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval; @@ -6403,31 +6292,35 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_16); } net->dest_state |= SCTP_ADDR_NO_PMTUD; - net->mtu = paddrp->spp_pathmtu; - switch (net->ro._l_addr.sa.sa_family) { + if (paddrp->spp_pathmtu > 0) { + net->mtu = paddrp->spp_pathmtu; + switch (net->ro._l_addr.sa.sa_family) { #ifdef INET - case AF_INET: - net->mtu += SCTP_MIN_V4_OVERHEAD; - break; + case AF_INET: + net->mtu += SCTP_MIN_V4_OVERHEAD; + break; #endif #ifdef INET6 - case AF_INET6: - net->mtu += SCTP_MIN_OVERHEAD; - break; + case AF_INET6: + net->mtu += SCTP_MIN_OVERHEAD; + break; #endif #if defined(__Userspace__) - case AF_CONN: - net->mtu += sizeof(struct sctphdr); - break; + case AF_CONN: + net->mtu += sizeof(struct sctphdr); + break; #endif - default: - break; - } - if (net->mtu < stcb->asoc.smallest_mtu) { - sctp_pathmtu_adjustment(stcb, net->mtu); + default: + break; + } + if (net->mtu < stcb->asoc.smallest_mtu) { + sctp_pathmtu_adjustment(stcb, net->mtu); + } } } - stcb->asoc.default_mtu = paddrp->spp_pathmtu; + if (paddrp->spp_pathmtu > 0) { + stcb->asoc.default_mtu = paddrp->spp_pathmtu; + } sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD); } if (paddrp->spp_flags & SPP_PMTUD_ENABLE) { @@ -6473,7 +6366,7 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, * For the TOS/FLOWLABEL stuff you set it * with the options on the socket */ - if (paddrp->spp_pathmaxrxt != 0) { + if (paddrp->spp_pathmaxrxt > 0) { inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt; } @@ -6482,14 +6375,14 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, else if (paddrp->spp_hbinterval != 0) { if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL) paddrp->spp_hbinterval= SCTP_MAX_HB_INTERVAL; - inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval); + inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = sctp_msecs_to_ticks(paddrp->spp_hbinterval); } if (paddrp->spp_flags & SPP_HB_ENABLE) { if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) { inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0; } else if (paddrp->spp_hbinterval) { - inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval); + inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = sctp_msecs_to_ticks(paddrp->spp_hbinterval); } sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT); } else if (paddrp->spp_flags & SPP_HB_DISABLE) { @@ -6499,7 +6392,9 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, inp->sctp_ep.default_mtu = 0; sctp_feature_off(inp, SCTP_PCB_FLAGS_DO_NOT_PMTUD); } else if (paddrp->spp_flags & SPP_PMTUD_DISABLE) { - inp->sctp_ep.default_mtu = paddrp->spp_pathmtu; + if (paddrp->spp_pathmtu > 0) { + inp->sctp_ep.default_mtu = paddrp->spp_pathmtu; + } sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_NOT_PMTUD); } if (paddrp->spp_flags & SPP_DSCP) { @@ -6592,19 +6487,21 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize); SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id); - if (sasoc->sasoc_cookie_life) { + if (sasoc->sasoc_cookie_life > 0) { /* boundary check the cookie life */ - if (sasoc->sasoc_cookie_life < 1000) - sasoc->sasoc_cookie_life = 1000; + if (sasoc->sasoc_cookie_life < SCTP_MIN_COOKIE_LIFE) { + sasoc->sasoc_cookie_life = SCTP_MIN_COOKIE_LIFE; + } if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) { sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE; } } if (stcb) { - if (sasoc->sasoc_asocmaxrxt) + if (sasoc->sasoc_asocmaxrxt > 0) { stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt; - if (sasoc->sasoc_cookie_life) { - stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); + } + if (sasoc->sasoc_cookie_life > 0) { + stcb->asoc.cookie_life = sctp_msecs_to_ticks(sasoc->sasoc_cookie_life); } SCTP_TCB_UNLOCK(stcb); } else { @@ -6613,10 +6510,11 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && (sasoc->sasoc_assoc_id == SCTP_FUTURE_ASSOC))) { SCTP_INP_WLOCK(inp); - if (sasoc->sasoc_asocmaxrxt) + if (sasoc->sasoc_asocmaxrxt > 0) { inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt; - if (sasoc->sasoc_cookie_life) { - inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life); + } + if (sasoc->sasoc_cookie_life > 0) { + inp->sctp_ep.def_cookie_life = sctp_msecs_to_ticks(sasoc->sasoc_cookie_life); } SCTP_INP_WUNLOCK(inp); } else { @@ -6726,19 +6624,13 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, #ifdef SCTP_MVRF int i, fnd = 0; #endif -#if !defined(__Windows__) && !defined(__Userspace__) +#if !defined(_WIN32) && !defined(__Userspace__) #if defined(__APPLE__) struct proc *proc; #endif -#ifdef __FreeBSD__ -#if __FreeBSD_version > 602000 +#if defined(__FreeBSD__) error = priv_check(curthread, PRIV_NETINET_RESERVEDPORT); -#elif __FreeBSD_version >= 500000 - error = suser((struct thread *)p); -#else - error = suser(p); -#endif #elif defined(__APPLE__) proc = (struct proc *)p; if (p) { @@ -6832,7 +6724,7 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, error = EINVAL; goto out_of_it; } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) } else { switch (addr->sa_family) { #ifdef INET @@ -6887,23 +6779,23 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, } case SCTP_BINDX_ADD_ADDR: { - struct sctp_getaddresses *addrs; -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 + struct sockaddr *sa; +#if defined(__FreeBSD__) && !defined(__Userspace__) struct thread *td; td = (struct thread *)p; #endif - SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, - optsize); + SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize); #ifdef INET - if (addrs->addr->sa_family == AF_INET) { - if (optsize < sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in)) { + if (sa->sa_family == AF_INET) { + if (optsize < sizeof(struct sockaddr_in)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 - if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) { +#if defined(__FreeBSD__) && !defined(__Userspace__) + if (td != NULL && + (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)sa)->sin_addr)))) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); break; } @@ -6911,65 +6803,16 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, } else #endif #ifdef INET6 - if (addrs->addr->sa_family == AF_INET6) { - if (optsize < sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6)) { + if (sa->sa_family == AF_INET6) { + if (optsize < sizeof(struct sockaddr_in6)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); error = EINVAL; break; } -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 - if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr), - (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { - SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); - break; - } -#endif - } else -#endif - { - error = EAFNOSUPPORT; - break; - } - sctp_bindx_add_address(so, inp, addrs->addr, - addrs->sget_assoc_id, vrf_id, - &error, p); - break; - } - case SCTP_BINDX_REM_ADDR: - { - struct sctp_getaddresses *addrs; -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 - struct thread *td; - td = (struct thread *)p; - -#endif - SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize); -#ifdef INET - if (addrs->addr->sa_family == AF_INET) { - if (optsize < sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in)) { - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); - error = EINVAL; - break; - } -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 - if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) { - SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); - break; - } -#endif - } else -#endif -#ifdef INET6 - if (addrs->addr->sa_family == AF_INET6) { - if (optsize < sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6)) { - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); - error = EINVAL; - break; - } -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 +#if defined(__FreeBSD__) && !defined(__Userspace__) if (td != NULL && (error = prison_local_ip6(td->td_ucred, - &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr), + &(((struct sockaddr_in6 *)sa)->sin6_addr), (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); break; @@ -6981,12 +6824,60 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, error = EAFNOSUPPORT; break; } - sctp_bindx_delete_address(inp, addrs->addr, - addrs->sget_assoc_id, vrf_id, - &error); + sctp_bindx_add_address(so, inp, sa, vrf_id, &error, p); break; } -#ifdef __APPLE__ + case SCTP_BINDX_REM_ADDR: + { + struct sockaddr *sa; +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct thread *td; + td = (struct thread *)p; + +#endif + SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize); +#ifdef INET + if (sa->sa_family == AF_INET) { + if (optsize < sizeof(struct sockaddr_in)) { + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); + error = EINVAL; + break; + } +#if defined(__FreeBSD__) && !defined(__Userspace__) + if (td != NULL && + (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)sa)->sin_addr)))) { + SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); + break; + } +#endif + } else +#endif +#ifdef INET6 + if (sa->sa_family == AF_INET6) { + if (optsize < sizeof(struct sockaddr_in6)) { + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); + error = EINVAL; + break; + } +#if defined(__FreeBSD__) && !defined(__Userspace__) + if (td != NULL && + (error = prison_local_ip6(td->td_ucred, + &(((struct sockaddr_in6 *)sa)->sin6_addr), + (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { + SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error); + break; + } +#endif + } else +#endif + { + error = EAFNOSUPPORT; + break; + } + sctp_bindx_delete_address(inp, sa, vrf_id, &error); + break; + } +#if defined(__APPLE__) && !defined(__Userspace__) case SCTP_LISTEN_FIX: /* only applies to one-to-many sockets */ if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { @@ -6998,7 +6889,7 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, error = EINVAL; } break; -#endif /* __APPLE__ */ +#endif case SCTP_EVENT: { struct sctp_event *event; @@ -7302,8 +7193,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, if ((stcb != NULL) && (net == NULL)) { #ifdef INET if (addr->sa_family == AF_INET) { - struct sockaddr_in *sin; + sin = (struct sockaddr_in *)addr; if (sin->sin_addr.s_addr != INADDR_ANY) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); @@ -7483,8 +7374,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, if ((stcb != NULL) && (net == NULL)) { #ifdef INET if (addr->sa_family == AF_INET) { - struct sockaddr_in *sin; + sin = (struct sockaddr_in *)addr; if (sin->sin_addr.s_addr != INADDR_ANY) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); @@ -7827,17 +7718,18 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, return (error); } -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) int sctp_ctloutput(struct socket *so, struct sockopt *sopt) { - void *optval = NULL; - size_t optsize = 0; - void *p; - int error = 0; #if defined(__FreeBSD__) + struct epoch_tracker et; struct sctp_inpcb *inp; #endif + void *optval = NULL; + void *p; + size_t optsize = 0; + int error = 0; #if defined(__FreeBSD__) if ((sopt->sopt_level == SOL_SOCKET) && @@ -7884,13 +7776,19 @@ sctp_ctloutput(struct socket *so, struct sockopt *sopt) goto out; } } -#if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__Windows__) +#if defined(__FreeBSD__) || defined(_WIN32) p = (void *)sopt->sopt_td; #else p = (void *)sopt->sopt_p; #endif if (sopt->sopt_dir == SOPT_SET) { +#if defined(__FreeBSD__) + NET_EPOCH_ENTER(et); +#endif error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p); +#if defined(__FreeBSD__) + NET_EPOCH_EXIT(et); +#endif } else if (sopt->sopt_dir == SOPT_GET) { error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p); } else { @@ -7909,21 +7807,20 @@ out: #endif #ifdef INET -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 -static int -sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p) -{ -#else -#if defined(__FreeBSD__) || defined(__APPLE__) -static int -sctp_connect(struct socket *so, struct sockaddr *addr, struct proc *p) -{ -#elif defined(__Panda__) || defined(__Userspace__) +#if defined(__Userspace__) int sctp_connect(struct socket *so, struct sockaddr *addr) { void *p = NULL; -#elif defined(__Windows__) +#elif defined(__FreeBSD__) +static int +sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p) +{ +#elif defined(__APPLE__) +static int +sctp_connect(struct socket *so, struct sockaddr *addr, struct proc *p) +{ +#elif defined(_WIN32) static int sctp_connect(struct socket *so, struct sockaddr *addr, PKTHREAD p) { @@ -7934,6 +7831,8 @@ sctp_connect(struct socket *so, struct mbuf *nam, struct proc *p) struct sockaddr *addr = mtod(nam, struct sockaddr *); #endif +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct epoch_tracker et; #endif #ifdef SCTP_MVRF int i, fnd = 0; @@ -7956,14 +7855,14 @@ sctp_connect(struct socket *so, struct mbuf *nam, struct proc *p) } #if defined(__Userspace__) - /* TODO __Userspace__ falls into this code for IPv6 stuff at the moment... */ + /* TODO __Userspace__ falls into this code for IPv6 stuff at the moment... */ #endif -#if !defined(__Windows__) && !defined(__Userspace_os_Linux) && !defined(__Userspace_os_Windows) +#if !defined(_WIN32) && !defined(__linux__) && !defined(__EMSCRIPTEN__) switch (addr->sa_family) { #ifdef INET6 case AF_INET6: { -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 +#if defined(__FreeBSD__) && !defined(__Userspace__) struct sockaddr_in6 *sin6; #endif @@ -7971,7 +7870,7 @@ sctp_connect(struct socket *so, struct mbuf *nam, struct proc *p) SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 +#if defined(__FreeBSD__) && !defined(__Userspace__) sin6 = (struct sockaddr_in6 *)addr; if (p != NULL && (error = prison_remote_ip6(p->td_ucred, &sin6->sin6_addr)) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); @@ -7984,17 +7883,17 @@ sctp_connect(struct socket *so, struct mbuf *nam, struct proc *p) #ifdef INET case AF_INET: { -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 +#if defined(__FreeBSD__) && !defined(__Userspace__) struct sockaddr_in *sin; #endif -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) if (addr->sa_len != sizeof(struct sockaddr_in)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); return (EINVAL); } #endif -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 +#if defined(__FreeBSD__) && !defined(__Userspace__) sin = (struct sockaddr_in *)addr; if (p != NULL && (error = prison_remote_ip4(p->td_ucred, &sin->sin_addr)) != 0) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); @@ -8012,7 +7911,9 @@ sctp_connect(struct socket *so, struct mbuf *nam, struct proc *p) SCTP_INP_INCR_REF(inp); SCTP_ASOC_CREATE_LOCK(inp); create_lock_on = 1; - +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { @@ -8098,29 +7999,26 @@ sctp_connect(struct socket *so, struct mbuf *nam, struct proc *p) } #endif /* We are GOOD to go */ - stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, - inp->sctp_ep.pre_open_stream_count, - inp->sctp_ep.port, p, - SCTP_INITIALIZE_AUTH_PARAMS); + stcb = sctp_aloc_assoc_connected(inp, addr, &error, 0, 0, vrf_id, + inp->sctp_ep.pre_open_stream_count, + inp->sctp_ep.port, p, + SCTP_INITIALIZE_AUTH_PARAMS); if (stcb == NULL) { /* Gak! no memory */ goto out_now; } - if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { - stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; - /* Set the connected flag so we can queue data */ - soisconnecting(so); - } SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); SCTP_TCB_UNLOCK(stcb); out_now: +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif if (create_lock_on) { SCTP_ASOC_CREATE_UNLOCK(inp); } - SCTP_INP_DECR_REF(inp); return (error); } @@ -8263,19 +8161,14 @@ sctpconn_connect(struct socket *so, struct sockaddr *addr) } #endif /* We are GOOD to go */ - stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, - inp->sctp_ep.pre_open_stream_count, - inp->sctp_ep.port, p, - SCTP_INITIALIZE_AUTH_PARAMS); + stcb = sctp_aloc_assoc_connected(inp, addr, &error, 0, 0, vrf_id, + inp->sctp_ep.pre_open_stream_count, + inp->sctp_ep.port, p, + SCTP_INITIALIZE_AUTH_PARAMS); if (stcb == NULL) { /* Gak! no memory */ goto out_now; } - if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { - stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; - /* Set the connected flag so we can queue data */ - soisconnecting(so); - } SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); @@ -8291,16 +8184,12 @@ sctpconn_connect(struct socket *so, struct sockaddr *addr) } #endif int -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 -#if __FreeBSD_version >= 700000 -sctp_listen(struct socket *so, int backlog, struct thread *p) -#else -sctp_listen(struct socket *so, struct thread *p) -#endif -#elif defined(__Windows__) -sctp_listen(struct socket *so, int backlog, PKTHREAD p) -#elif defined(__Userspace__) +#if defined(__Userspace__) sctp_listen(struct socket *so, int backlog, struct proc *p) +#elif defined(__FreeBSD__) +sctp_listen(struct socket *so, int backlog, struct thread *p) +#elif defined(_WIN32) +sctp_listen(struct socket *so, int backlog, PKTHREAD p) #else sctp_listen(struct socket *so, struct proc *p) #endif @@ -8429,20 +8318,12 @@ sctp_listen(struct socket *so, struct proc *p) } } } - SCTP_INP_RLOCK(inp); + SCTP_INP_INFO_WLOCK(); + SCTP_INP_WLOCK(inp); #ifdef SCTP_LOCK_LOGGING if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) { sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK); } -#endif -#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Userspace__) - SOCK_LOCK(so); - error = solisten_proto_check(so); - SOCK_UNLOCK(so); - if (error) { - SCTP_INP_RUNLOCK(inp); - return (error); - } #endif if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { @@ -8453,47 +8334,57 @@ sctp_listen(struct socket *so, struct proc *p) * - We must then move the guy that was listener to the TCP Pool. */ if (sctp_swap_inpcb_for_listen(inp)) { - SCTP_INP_RUNLOCK(inp); - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); - return (EADDRINUSE); + error = EADDRINUSE; + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); + goto out; } } - +#if defined(__FreeBSD__) || defined(__Userspace__) + SOCK_LOCK(so); + error = solisten_proto_check(so); + if (error) { + SOCK_UNLOCK(so); + goto out; + } +#endif if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) { - /* We are already connected AND the TCP model */ - SCTP_INP_RUNLOCK(inp); - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE); - return (EADDRINUSE); + SOCK_UNLOCK(so); +#if defined(__FreeBSD__) && !defined(__Userspace__) + solisten_proto_abort(so); +#endif + error = EADDRINUSE; + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error); + goto out; } - SCTP_INP_RUNLOCK(inp); if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { - /* We must do a bind. */ - if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) { + if ((error = sctp_inpcb_bind_locked(inp, NULL, NULL, p))) { + SOCK_UNLOCK(so); +#if defined(__FreeBSD__) && !defined(__Userspace__) + solisten_proto_abort(so); +#endif /* bind error, probably perm */ - return (error); + goto out; } } - SCTP_INP_WLOCK(inp); -#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__) || defined(__Userspace__) -#if __FreeBSD_version >= 1200034 +#if defined(__FreeBSD__) && !defined(__Userspace__) if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) == 0) { - SOCK_LOCK(so); solisten_proto(so, backlog); SOCK_UNLOCK(so); + inp->sctp_flags |= SCTP_PCB_FLAGS_ACCEPTING; + } else { + solisten_proto_abort(so); + SOCK_UNLOCK(so); + if (backlog > 0) { + inp->sctp_flags |= SCTP_PCB_FLAGS_ACCEPTING; + } else { + inp->sctp_flags &= ~SCTP_PCB_FLAGS_ACCEPTING; + } } -#elif __FreeBSD_version >= 700000 || defined(__Windows__) || defined(__Userspace__) - /* It appears for 7.0 and on, we must always call this. */ - SOCK_LOCK(so); +#elif defined(_WIN32) || defined(__Userspace__) solisten_proto(so, backlog); -#else - SOCK_LOCK(so); - if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) == 0) { - solisten_proto(so); - } #endif -#endif -#if !defined(__FreeBSD__) || __FreeBSD_version < 1200034 +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { /* remove the ACCEPTCONN flag for one-to-many sockets */ #if defined(__Userspace__) @@ -8503,17 +8394,15 @@ sctp_listen(struct socket *so, struct proc *p) #endif } SOCK_UNLOCK(so); -#endif -#if (defined(__FreeBSD__) && __FreeBSD_version >= 700000) || defined(__Windows__) || defined(__Userspace__) if (backlog > 0) { inp->sctp_flags |= SCTP_PCB_FLAGS_ACCEPTING; } else { inp->sctp_flags &= ~SCTP_PCB_FLAGS_ACCEPTING; } -#else - inp->sctp_flags |= SCTP_PCB_FLAGS_ACCEPTING; #endif +out: SCTP_INP_WUNLOCK(inp); + SCTP_INP_INFO_WUNLOCK(); return (error); } @@ -8526,9 +8415,9 @@ sctp_accept(struct socket *so, struct sockaddr **addr) struct sctp_inpcb *inp; union sctp_sockstore store; #ifdef INET6 -#ifdef SCTP_KAME +#if defined(SCTP_KAME) && defined(SCTP_EMBEDDED_V6_SCOPE) int error; -#endif /* SCTP_KAME */ +#endif #endif inp = (struct sctp_inpcb *)so->so_pcb; @@ -8566,7 +8455,7 @@ sctp_accept(struct socket *so, struct sockaddr **addr) #if defined(__Userspace__) /*__Userspace__ calling sowwakup_locked because of SOCKBUF_LOCK above. */ #endif -#if defined(__FreeBSD__) || defined(__Windows__) || defined(__Userspace__) +#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__) sowwakeup_locked(inp->sctp_socket); #else #if defined(__APPLE__) @@ -8586,7 +8475,7 @@ sctp_accept(struct socket *so, struct sockaddr **addr) #if defined(__Userspace__) /*__Userspace__ calling sorwakup_locked because of SOCKBUF_LOCK above */ #endif -#if defined(__FreeBSD__) || defined(__Windows__) || defined(__Userspace__) +#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__) sorwakeup_locked(inp->sctp_socket); #else #if defined(__APPLE__) @@ -8612,25 +8501,16 @@ sctp_accept(struct socket *so, struct sockaddr **addr) { struct sockaddr_in *sin; -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__) SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); if (sin == NULL) return (ENOMEM); -#else - sin = (struct sockaddr_in *)addr; - memset(sin, 0, sizeof(*sin)); -#endif sin->sin_family = AF_INET; #ifdef HAVE_SIN_LEN sin->sin_len = sizeof(*sin); #endif sin->sin_port = store.sin.sin_port; sin->sin_addr = store.sin.sin_addr; -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__) *addr = (struct sockaddr *)sin; -#elif !defined(__Panda__) - SCTP_BUF_LEN(nam) = sizeof(*sin); -#endif break; } #endif @@ -8639,14 +8519,9 @@ sctp_accept(struct socket *so, struct sockaddr **addr) { struct sockaddr_in6 *sin6; -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__) SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); if (sin6 == NULL) return (ENOMEM); -#else - sin6 = (struct sockaddr_in6 *)addr; - memset(sin6, 0, sizeof(*sin6)); -#endif sin6->sin6_family = AF_INET6; #ifdef HAVE_SIN6_LEN sin6->sin6_len = sizeof(*sin6); @@ -8670,11 +8545,7 @@ sctp_accept(struct socket *so, struct sockaddr **addr) sin6->sin6_scope_id = 0; /* XXX */ #endif /* SCTP_KAME */ #endif /* SCTP_EMBEDDED_V6_SCOPE */ -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__) *addr = (struct sockaddr *)sin6; -#elif !defined(__Panda__) - SCTP_BUF_LEN(nam) = sizeof(*sin6); -#endif break; } #endif @@ -8706,14 +8577,10 @@ sctp_accept(struct socket *so, struct sockaddr **addr) #ifdef INET int -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) sctp_ingetaddr(struct socket *so, struct sockaddr **addr) { struct sockaddr_in *sin; -#elif defined(__Panda__) -sctp_ingetaddr(struct socket *so, struct sockaddr *addr) -{ - struct sockaddr_in *sin = (struct sockaddr_in *)addr; #else sctp_ingetaddr(struct socket *so, struct mbuf *nam) { @@ -8726,12 +8593,10 @@ sctp_ingetaddr(struct socket *so, struct mbuf *nam) /* * Do the malloc first in case it blocks. */ -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); if (sin == NULL) return (ENOMEM); -#elif defined(__Panda__) - memset(sin, 0, sizeof(*sin)); #else SCTP_BUF_LEN(nam) = sizeof(*sin); memset(sin, 0, sizeof(*sin)); @@ -8742,7 +8607,7 @@ sctp_ingetaddr(struct socket *so, struct mbuf *nam) #endif inp = (struct sctp_inpcb *)so->so_pcb; if (!inp) { -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_FREE_SONAME(sin); #endif SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); @@ -8813,7 +8678,7 @@ sctp_ingetaddr(struct socket *so, struct mbuf *nam) } } if (!fnd) { -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_FREE_SONAME(sin); #endif SCTP_INP_RUNLOCK(inp); @@ -8822,21 +8687,17 @@ sctp_ingetaddr(struct socket *so, struct mbuf *nam) } } SCTP_INP_RUNLOCK(inp); -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) (*addr) = (struct sockaddr *)sin; #endif return (0); } int -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) sctp_peeraddr(struct socket *so, struct sockaddr **addr) { struct sockaddr_in *sin; -#elif defined(__Panda__) -sctp_peeraddr(struct socket *so, struct sockaddr *addr) -{ - struct sockaddr_in *sin = (struct sockaddr_in *)addr; #else sctp_peeraddr(struct socket *so, struct mbuf *nam) { @@ -8850,12 +8711,10 @@ sctp_peeraddr(struct socket *so, struct mbuf *nam) struct sctp_nets *net; /* Do the malloc first in case it blocks. */ -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin); if (sin == NULL) return (ENOMEM); -#elif defined(__Panda__) - memset(sin, 0, sizeof(*sin)); #else SCTP_BUF_LEN(nam) = sizeof(*sin); memset(sin, 0, sizeof(*sin)); @@ -8869,7 +8728,7 @@ sctp_peeraddr(struct socket *so, struct mbuf *nam) if ((inp == NULL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { /* UDP type and listeners will drop out here */ -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_FREE_SONAME(sin); #endif SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN); @@ -8882,7 +8741,7 @@ sctp_peeraddr(struct socket *so, struct mbuf *nam) } SCTP_INP_RUNLOCK(inp); if (stcb == NULL) { -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_FREE_SONAME(sin); #endif SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); @@ -8901,19 +8760,19 @@ sctp_peeraddr(struct socket *so, struct mbuf *nam) SCTP_TCB_UNLOCK(stcb); if (!fnd) { /* No IPv4 address */ -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_FREE_SONAME(sin); #endif SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); return (ENOENT); } -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) (*addr) = (struct sockaddr *)sin; #endif return (0); } -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) struct pr_usrreqs sctp_usrreqs = { #if defined(__FreeBSD__) .pru_abort = sctp_abort, @@ -8922,15 +8781,10 @@ struct pr_usrreqs sctp_usrreqs = { .pru_bind = sctp_bind, .pru_connect = sctp_connect, .pru_control = in_control, -#if __FreeBSD_version >= 690000 .pru_close = sctp_close, .pru_detach = sctp_close, .pru_sopoll = sopoll_generic, .pru_flush = sctp_flush, -#else - .pru_detach = sctp_detach, - .pru_sopoll = sopoll, -#endif .pru_disconnect = sctp_disconnect, .pru_listen = sctp_listen, .pru_peeraddr = sctp_peeraddr, @@ -8960,7 +8814,7 @@ struct pr_usrreqs sctp_usrreqs = { .pru_sosend = sctp_sosend, .pru_soreceive = sctp_soreceive, .pru_sopoll = sopoll -#elif defined(__Windows__) +#elif defined(_WIN32) && !defined(__Userspace__) sctp_abort, sctp_accept, sctp_attach, @@ -8986,7 +8840,7 @@ struct pr_usrreqs sctp_usrreqs = { sctp_close #endif }; -#elif !defined(__Panda__) && !defined(__Userspace__) +#elif !defined(__Userspace__) int sctp_usrreq(so, req, m, nam, control) struct socket *so; @@ -8994,8 +8848,6 @@ sctp_usrreq(so, req, m, nam, control) struct mbuf *m, *nam, *control; { struct proc *p = curproc; - uint32_t vrf_id; - struct sctp_vrf *vrf; int error; int family; struct sctp_inpcb *inp = (struct sctp_inpcb *)so->so_pcb; @@ -9131,7 +8983,7 @@ register_recv_cb(struct socket *so, } int -register_send_cb(struct socket *so, uint32_t sb_threshold, int (*send_cb)(struct socket *sock, uint32_t sb_free)) +register_send_cb(struct socket *so, uint32_t sb_threshold, int (*send_cb)(struct socket *sock, uint32_t sb_free, void *ulp_info)) { struct sctp_inpcb *inp; @@ -9165,4 +9017,23 @@ register_ulp_info (struct socket *so, void *ulp_info) SCTP_INP_WUNLOCK(inp); return (1); } + +int +retrieve_ulp_info (struct socket *so, void **pulp_info) +{ + struct sctp_inpcb *inp; + + if (pulp_info == NULL) { + return (0); + } + + inp = (struct sctp_inpcb *) so->so_pcb; + if (inp == NULL) { + return (0); + } + SCTP_INP_RLOCK(inp); + *pulp_info = inp->ulp_info; + SCTP_INP_RUNLOCK(inp); + return (1); +} #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_var.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_var.h index ead9e9cd6..3cdfdfe76 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_var.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctp_var.h @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctp_var.h 317457 2017-04-26 19:26:40Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_var.h 365071 2020-09-01 21:19:14Z mjg $"); #endif #ifndef _NETINET_SCTP_VAR_H_ @@ -44,11 +44,10 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_var.h 317457 2017-04-26 19:26:40Z tuex #if defined(_KERNEL) || defined(__Userspace__) -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) extern struct pr_usrreqs sctp_usrreqs; #endif - #define sctp_feature_on(inp, feature) (inp->sctp_features |= feature) #define sctp_feature_off(inp, feature) (inp->sctp_features &= ~feature) #define sctp_is_feature_on(inp, feature) ((inp->sctp_features & feature) == feature) @@ -186,18 +185,11 @@ extern struct pr_usrreqs sctp_usrreqs; } \ } -#if defined(__FreeBSD__) && __FreeBSD_version > 500000 - +#if defined(__FreeBSD__) && !defined(__Userspace__) #define sctp_free_remote_addr(__net) { \ if ((__net)) { \ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&(__net)->ref_count)) { \ - (void)SCTP_OS_TIMER_STOP(&(__net)->rxt_timer.timer); \ - (void)SCTP_OS_TIMER_STOP(&(__net)->pmtu_timer.timer); \ - (void)SCTP_OS_TIMER_STOP(&(__net)->hb_timer.timer); \ - if ((__net)->ro.ro_rt) { \ - RTFREE((__net)->ro.ro_rt); \ - (__net)->ro.ro_rt = NULL; \ - } \ + RO_NHFREE(&(__net)->ro); \ if ((__net)->src_addr_selected) { \ sctp_free_ifa((__net)->ro._s_addr); \ (__net)->ro._s_addr = NULL; \ @@ -233,15 +225,10 @@ extern struct pr_usrreqs sctp_usrreqs; SCTP_BUF_TYPE(m) != MT_OOBDATA) \ atomic_add_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \ } - #else /* FreeBSD Version <= 500000 or non-FreeBSD */ - #define sctp_free_remote_addr(__net) { \ if ((__net)) { \ if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&(__net)->ref_count)) { \ - (void)SCTP_OS_TIMER_STOP(&(__net)->rxt_timer.timer); \ - (void)SCTP_OS_TIMER_STOP(&(__net)->pmtu_timer.timer); \ - (void)SCTP_OS_TIMER_STOP(&(__net)->hb_timer.timer); \ if ((__net)->ro.ro_rt) { \ RTFREE((__net)->ro.ro_rt); \ (__net)->ro.ro_rt = NULL; \ @@ -258,31 +245,6 @@ extern struct pr_usrreqs sctp_usrreqs; } \ } -#if defined(__Panda__) -#define sctp_sbfree(ctl, stcb, sb, m) { \ - if ((sb)->sb_cc >= (uint32_t)SCTP_BUF_LEN((m))) { \ - atomic_subtract_int(&(sb)->sb_cc, SCTP_BUF_LEN((m))); \ - } else { \ - (sb)->sb_cc = 0; \ - } \ - if (((ctl)->do_not_ref_stcb == 0) && stcb) { \ - if ((stcb)->asoc.sb_cc >= (uint32_t)SCTP_BUF_LEN((m))) { \ - atomic_subtract_int(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \ - } else { \ - (stcb)->asoc.sb_cc = 0; \ - } \ - } \ -} - -#define sctp_sballoc(stcb, sb, m) { \ - atomic_add_int(&(sb)->sb_cc, SCTP_BUF_LEN((m))); \ - if (stcb) { \ - atomic_add_int(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \ - } \ -} - -#else - #define sctp_sbfree(ctl, stcb, sb, m) { \ SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_cc, SCTP_BUF_LEN((m))); \ SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_mbcnt, MSIZE); \ @@ -301,7 +263,6 @@ extern struct pr_usrreqs sctp_usrreqs; } \ } #endif -#endif #define sctp_ucount_incr(val) { \ val++; \ @@ -402,23 +363,13 @@ struct sctp_inpcb; struct sctp_tcb; struct sctphdr; - -#if (defined(__FreeBSD__) && __FreeBSD_version > 690000) || defined(__Windows__) || defined(__Userspace__) +#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__) void sctp_close(struct socket *so); #else int sctp_detach(struct socket *so); #endif int sctp_disconnect(struct socket *so); -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) -#if defined(__FreeBSD__) && __FreeBSD_version < 902000 -void sctp_ctlinput __P((int, struct sockaddr *, void *)); -int sctp_ctloutput __P((struct socket *, struct sockopt *)); -#ifdef INET -void sctp_input_with_port __P((struct mbuf *, int, uint16_t)); -void sctp_input __P((struct mbuf *, int)); -#endif -void sctp_pathmtu_adjustment __P((struct sctp_tcb *, uint16_t)); -#else +#if !defined(__Userspace__) #if defined(__APPLE__) && !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) && !defined(APPLE_ELCAPITAN) void sctp_ctlinput(int, struct sockaddr *, void *, struct ifnet * SCTP_UNUSED); #else @@ -427,18 +378,15 @@ void sctp_ctlinput(int, struct sockaddr *, void *); int sctp_ctloutput(struct socket *, struct sockopt *); #ifdef INET void sctp_input_with_port(struct mbuf *, int, uint16_t); -#if defined(__FreeBSD__) && __FreeBSD_version >= 1100020 +#if defined(__FreeBSD__) && !defined(__Userspace__) int sctp_input(struct mbuf **, int *, int); #else void sctp_input(struct mbuf *, int); #endif #endif void sctp_pathmtu_adjustment(struct sctp_tcb *, uint16_t); -#endif #else -#if defined(__Panda__) -void sctp_input(pakhandle_type i_pak); -#elif defined(__Userspace__) +#if defined(__Userspace__) void sctp_pathmtu_adjustment(struct sctp_tcb *, uint16_t); #else void sctp_input(struct mbuf *,...); @@ -446,17 +394,11 @@ void sctp_input(struct mbuf *,...); void *sctp_ctlinput(int, struct sockaddr *, void *); int sctp_ctloutput(int, struct socket *, int, int, struct mbuf **); #endif -#if defined(__FreeBSD__) && __FreeBSD_version < 902000 -void sctp_drain __P((void)); -#else void sctp_drain(void); -#endif #if defined(__Userspace__) void sctp_init(uint16_t, int (*)(void *addr, void *buffer, size_t length, uint8_t tos, uint8_t set_df), void (*)(const char *, ...), int start_threads); -#elif defined(__FreeBSD__) && __FreeBSD_version < 902000 -void sctp_init __P((void)); #elif defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) &&!defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)) void sctp_init(struct protosw *pp, struct domain *dp); #else @@ -464,55 +406,37 @@ void sctp_init(void); void sctp_notify(struct sctp_inpcb *, struct sctp_tcb *, struct sctp_nets *, uint8_t, uint8_t, uint16_t, uint32_t); #endif -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__Userspace__) void sctp_finish(void); #endif -#if defined(__FreeBSD__) || defined(__Windows__) || defined(__Userspace__) +#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__) int sctp_flush(struct socket *, int); #endif -#if defined(__FreeBSD__) && __FreeBSD_version < 902000 -int sctp_shutdown __P((struct socket *)); -#else int sctp_shutdown(struct socket *); -#endif int sctp_bindx(struct socket *, int, struct sockaddr_storage *, int, int, struct proc *); /* can't use sctp_assoc_t here */ int sctp_peeloff(struct socket *, struct socket *, int, caddr_t, int *); -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) int sctp_ingetaddr(struct socket *, struct sockaddr **); -#elif defined(__Panda__) -int sctp_ingetaddr(struct socket *, struct sockaddr *); #else int sctp_ingetaddr(struct socket *, struct mbuf *); #endif -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) int sctp_peeraddr(struct socket *, struct sockaddr **); -#elif defined(__Panda__) -int sctp_peeraddr(struct socket *, struct sockaddr *); #else int sctp_peeraddr(struct socket *, struct mbuf *); #endif -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 -#if __FreeBSD_version >= 700000 +#if defined(__FreeBSD__) && !defined(__Userspace__) int sctp_listen(struct socket *, int, struct thread *); -#else -int sctp_listen(struct socket *, struct thread *); -#endif -#elif defined(__Windows__) +#elif defined(_WIN32) && !defined(__Userspace__) int sctp_listen(struct socket *, int, PKTHREAD); #elif defined(__Userspace__) int sctp_listen(struct socket *, int, struct proc *); #else int sctp_listen(struct socket *, struct proc *); #endif -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__) int sctp_accept(struct socket *, struct sockaddr **); -#elif defined(__Panda__) -int sctp_accept(struct socket *, struct sockaddr *, int *, void *, int *); -#else -int sctp_accept(struct socket *, struct mbuf *); -#endif #endif /* _KERNEL */ diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctputil.c b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctputil.c index 001a2b37d..6cc62a80b 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctputil.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctputil.c @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 353518 2019-10-14 20:32:11Z tuexen $"); +__FBSDID("$FreeBSD$"); #endif #include @@ -58,7 +58,7 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 353518 2019-10-14 20:32:11Z tuex #if defined(__Userspace__) #include #endif -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #if defined(INET6) || defined(INET) #include @@ -71,11 +71,7 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 353518 2019-10-14 20:32:11Z tuex #endif #endif -#if defined(__APPLE__) -#define APPLE_FILE_NO 8 -#endif - -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) #if !defined(SCTP_LOCAL_TRACE_BUF) #include "eventrace_netinet.h" #include "sctputil.tmh" /* this is the file that will be auto generated */ @@ -274,7 +270,7 @@ sctp_log_mb(struct mbuf *m, int from) sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); if (SCTP_BUF_IS_EXTENDED(m)) { sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) /* APPLE does not use a ref_cnt, but a forward/backward ref queue */ #else sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m)); @@ -372,7 +368,7 @@ sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t #endif } -#ifndef __APPLE__ +#if !defined(__APPLE__) && !defined(__Userspace__) void sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) { @@ -387,7 +383,7 @@ sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) sctp_clog.x.lock.sock = (void *) NULL; } sctp_clog.x.lock.inp = (void *) inp; -#if (defined(__FreeBSD__) && __FreeBSD_version >= 503000) || (defined(__APPLE__)) +#if defined(__FreeBSD__) if (stcb) { sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); } else { @@ -400,15 +396,11 @@ sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; } -#if (defined(__FreeBSD__) && __FreeBSD_version <= 602000) - sctp_clog.x.lock.info_lock = mtx_owned(&SCTP_BASE_INFO(ipi_ep_mtx)); -#else sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); -#endif if (inp && (inp->sctp_socket)) { - sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); - sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); - sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); + sctp_clog.x.lock.sock_lock = mtx_owned(SOCK_MTX(inp->sctp_socket)); + sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_rcv)); + sctp_clog.x.lock.socksndbuf_lock = mtx_owned(SOCKBUF_MTX(&inp->sctp_socket->so_snd)); } else { sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; @@ -768,7 +760,6 @@ sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, stcb->asoc.total_flight, tot_out); /* now corrective action */ TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { - tot_out = 0; TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { if ((chk->whoTo == lnet) && @@ -803,6 +794,80 @@ sctp_audit_log(uint8_t ev, uint8_t fd) #endif +/* + * The conversion from time to ticks and vice versa is done by rounding + * upwards. This way we can test in the code the time to be positive and + * know that this corresponds to a positive number of ticks. + */ + +uint32_t +sctp_msecs_to_ticks(uint32_t msecs) +{ + uint64_t temp; + uint32_t ticks; + + if (hz == 1000) { + ticks = msecs; + } else { + temp = (((uint64_t)msecs * hz) + 999) / 1000; + if (temp > UINT32_MAX) { + ticks = UINT32_MAX; + } else { + ticks = (uint32_t)temp; + } + } + return (ticks); +} + +uint32_t +sctp_ticks_to_msecs(uint32_t ticks) +{ + uint64_t temp; + uint32_t msecs; + + if (hz == 1000) { + msecs = ticks; + } else { + temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz; + if (temp > UINT32_MAX) { + msecs = UINT32_MAX; + } else { + msecs = (uint32_t)temp; + } + } + return (msecs); +} + +uint32_t +sctp_secs_to_ticks(uint32_t secs) +{ + uint64_t temp; + uint32_t ticks; + + temp = (uint64_t)secs * hz; + if (temp > UINT32_MAX) { + ticks = UINT32_MAX; + } else { + ticks = (uint32_t)temp; + } + return (ticks); +} + +uint32_t +sctp_ticks_to_secs(uint32_t ticks) +{ + uint64_t temp; + uint32_t secs; + + temp = ((uint64_t)ticks + (hz - 1)) / hz; + if (temp > UINT32_MAX) { + secs = UINT32_MAX; + } else { + secs = (uint32_t)temp; + } + return (secs); +} + /* * sctp_stop_timers_for_shutdown() should be called * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT @@ -811,19 +876,66 @@ sctp_audit_log(uint8_t ev, uint8_t fd) void sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) { - struct sctp_association *asoc; + struct sctp_inpcb *inp; struct sctp_nets *net; - asoc = &stcb->asoc; + inp = stcb->sctp_ep; - (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); - (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); - (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); - (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); - (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); - TAILQ_FOREACH(net, &asoc->nets, sctp_next) { - (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); - (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer); + sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, + SCTP_FROM_SCTPUTIL + SCTP_LOC_12); + sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, + SCTP_FROM_SCTPUTIL + SCTP_LOC_13); + sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, + SCTP_FROM_SCTPUTIL + SCTP_LOC_14); + sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, + SCTP_FROM_SCTPUTIL + SCTP_LOC_15); + TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { + sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, + SCTP_FROM_SCTPUTIL + SCTP_LOC_16); + sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, + SCTP_FROM_SCTPUTIL + SCTP_LOC_17); + } +} + +void +sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer) +{ + struct sctp_inpcb *inp; + struct sctp_nets *net; + + inp = stcb->sctp_ep; + sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL, + SCTP_FROM_SCTPUTIL + SCTP_LOC_18); + sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL, + SCTP_FROM_SCTPUTIL + SCTP_LOC_19); + if (stop_assoc_kill_timer) { + sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, + SCTP_FROM_SCTPUTIL + SCTP_LOC_20); + } + sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL, + SCTP_FROM_SCTPUTIL + SCTP_LOC_21); + sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL, + SCTP_FROM_SCTPUTIL + SCTP_LOC_22); + sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL, + SCTP_FROM_SCTPUTIL + SCTP_LOC_23); + /* Mobility adaptation */ + sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL, + SCTP_FROM_SCTPUTIL + SCTP_LOC_24); + TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { + sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, + SCTP_FROM_SCTPUTIL + SCTP_LOC_25); + sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, + SCTP_FROM_SCTPUTIL + SCTP_LOC_26); + sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net, + SCTP_FROM_SCTPUTIL + SCTP_LOC_27); + sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net, + SCTP_FROM_SCTPUTIL + SCTP_LOC_28); + sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net, + SCTP_FROM_SCTPUTIL + SCTP_LOC_29); + sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, + SCTP_FROM_SCTPUTIL + SCTP_LOC_30); + sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, + SCTP_FROM_SCTPUTIL + SCTP_LOC_31); } } @@ -846,7 +958,7 @@ static uint32_t sctp_mtu_sizes[] = { 2048, 4352, 4464, - 8166, + 8168, 17912, 32000, 65532 @@ -1030,7 +1142,8 @@ sctp_map_assoc_state(int kernel_state) int sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, - uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms) + uint32_t override_tag, uint32_t initial_tsn, uint32_t vrf_id, + uint16_t o_strms) { struct sctp_association *asoc; /* @@ -1054,12 +1167,11 @@ sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, SCTP_SET_STATE(stcb, SCTP_STATE_INUSE); asoc->max_burst = inp->sctp_ep.max_burst; asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; - asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); + asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); asoc->cookie_life = inp->sctp_ep.def_cookie_life; asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; asoc->ecn_supported = inp->ecn_supported; asoc->prsctp_supported = inp->prsctp_supported; - asoc->idata_supported = inp->idata_supported; asoc->auth_supported = inp->auth_supported; asoc->asconf_supported = inp->asconf_supported; asoc->reconfig_supported = inp->reconfig_supported; @@ -1108,23 +1220,28 @@ sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, #endif asoc->refcnt = 0; asoc->assoc_up_sent = 0; - asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = - sctp_select_initial_TSN(&inp->sctp_ep); - asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; + if (override_tag) { + asoc->init_seq_number = initial_tsn; + } else { + asoc->init_seq_number = sctp_select_initial_TSN(&inp->sctp_ep); + } + asoc->asconf_seq_out = asoc->init_seq_number; + asoc->str_reset_seq_out = asoc->init_seq_number; + asoc->sending_seq = asoc->init_seq_number; + asoc->asconf_seq_out_acked = asoc->init_seq_number - 1; /* we are optimisitic here */ asoc->peer_supports_nat = 0; asoc->sent_queue_retran_cnt = 0; /* for CMT */ - asoc->last_net_cmt_send_started = NULL; + asoc->last_net_cmt_send_started = NULL; - /* This will need to be adjusted */ asoc->last_acked_seq = asoc->init_seq_number - 1; - asoc->advanced_peer_ack_point = asoc->last_acked_seq; - asoc->asconf_seq_in = asoc->last_acked_seq; + asoc->advanced_peer_ack_point = asoc->init_seq_number - 1; + asoc->asconf_seq_in = asoc->init_seq_number - 1; /* here we are different, we hold the next one we expect */ - asoc->str_reset_seq_in = asoc->last_acked_seq + 1; + asoc->str_reset_seq_in = asoc->init_seq_number; asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; asoc->initial_rto = inp->sctp_ep.initial_rto; @@ -1140,7 +1257,7 @@ sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, asoc->context = inp->sctp_context; asoc->local_strreset_support = inp->local_strreset_support; asoc->def_send = inp->def_send; - asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); + asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; asoc->pr_sctp_cnt = 0; asoc->total_output_queue_size = 0; @@ -1222,9 +1339,8 @@ sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, * that were dropped must be notified to the upper layer as * failed to send. */ - asoc->strmout[i].next_mid_ordered = 0; - asoc->strmout[i].next_mid_unordered = 0; TAILQ_INIT(&asoc->strmout[i].outqueue); + asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); asoc->strmout[i].chunks_on_queues = 0; #if defined(SCTP_DETAILED_STR_STATS) for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { @@ -1235,10 +1351,11 @@ sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, asoc->strmout[i].abandoned_sent[0] = 0; asoc->strmout[i].abandoned_unsent[0] = 0; #endif + asoc->strmout[i].next_mid_ordered = 0; + asoc->strmout[i].next_mid_unordered = 0; asoc->strmout[i].sid = i; asoc->strmout[i].last_msg_incomplete = 0; asoc->strmout[i].state = SCTP_STREAM_OPENING; - asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL); } asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); @@ -1368,15 +1485,20 @@ sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) return (0); } - static void sctp_iterator_work(struct sctp_iterator *it) { +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct epoch_tracker et; +#endif + struct sctp_inpcb *tinp; int iteration_count = 0; int inp_skip = 0; int first_in = 1; - struct sctp_inpcb *tinp; +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif SCTP_INP_INFO_RLOCK(); SCTP_ITERATOR_LOCK(); sctp_it_ctl.cur_it = it; @@ -1394,6 +1516,9 @@ done_with_iterator: (*it->function_atend) (it->pointer, it->val); } SCTP_FREE(it, SCTP_M_ITER); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif return; } select_a_new_ep: @@ -1413,6 +1538,7 @@ select_a_new_ep: } tinp = it->inp; it->inp = LIST_NEXT(it->inp, sctp_list); + it->stcb = NULL; SCTP_INP_RUNLOCK(tinp); if (it->inp == NULL) { goto done_with_iterator; @@ -1461,7 +1587,7 @@ select_a_new_ep: /* We won't be staying here */ SCTP_INP_DECR_REF(it->inp); atomic_add_int(&it->stcb->asoc.refcnt, -1); -#if !defined(__FreeBSD__) +#if !(defined(__FreeBSD__) && !defined(__Userspace__)) if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) { goto done_with_iterator; @@ -1488,6 +1614,9 @@ select_a_new_ep: atomic_add_int(&it->stcb->asoc.refcnt, -1); iteration_count = 0; } + KASSERT(it->inp == it->stcb->sctp_ep, + ("%s: stcb %p does not belong to inp %p, but inp %p", + __func__, it->stcb, it->inp, it->stcb->sctp_ep)); /* run function on this one */ (*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val); @@ -1520,6 +1649,7 @@ select_a_new_ep: } else { it->inp = LIST_NEXT(it->inp, sctp_list); } + it->stcb = NULL; if (it->inp == NULL) { goto done_with_iterator; } @@ -1529,24 +1659,23 @@ select_a_new_ep: void sctp_iterator_worker(void) { - struct sctp_iterator *it, *nit; + struct sctp_iterator *it; /* This function is called with the WQ lock in place */ - sctp_it_ctl.iterator_running = 1; - TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { + while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) { /* now lets work on this one */ TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); SCTP_IPI_ITERATOR_WQ_UNLOCK(); -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) CURVNET_SET(it->vn); #endif sctp_iterator_work(it); -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) CURVNET_RESTORE(); #endif SCTP_IPI_ITERATOR_WQ_LOCK(); -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__Userspace__) if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) { break; } @@ -1557,7 +1686,6 @@ sctp_iterator_worker(void) return; } - static void sctp_handle_addr_wq(void) { @@ -1612,31 +1740,67 @@ sctp_handle_addr_wq(void) } } +/*- + * The following table shows which pointers for the inp, stcb, or net are + * stored for each timer after it was started. + * + *|Name |Timer |inp |stcb|net | + *|-----------------------------|-----------------------------|----|----|----| + *|SCTP_TIMER_TYPE_SEND |net->rxt_timer |Yes |Yes |Yes | + *|SCTP_TIMER_TYPE_INIT |net->rxt_timer |Yes |Yes |Yes | + *|SCTP_TIMER_TYPE_RECV |stcb->asoc.dack_timer |Yes |Yes |No | + *|SCTP_TIMER_TYPE_SHUTDOWN |net->rxt_timer |Yes |Yes |Yes | + *|SCTP_TIMER_TYPE_HEARTBEAT |net->hb_timer |Yes |Yes |Yes | + *|SCTP_TIMER_TYPE_COOKIE |net->rxt_timer |Yes |Yes |Yes | + *|SCTP_TIMER_TYPE_NEWCOOKIE |inp->sctp_ep.signature_change|Yes |No |No | + *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer |Yes |Yes |Yes | + *|SCTP_TIMER_TYPE_SHUTDOWNACK |net->rxt_timer |Yes |Yes |Yes | + *|SCTP_TIMER_TYPE_ASCONF |stcb->asoc.asconf_timer |Yes |Yes |Yes | + *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer |Yes |Yes |No | + *|SCTP_TIMER_TYPE_AUTOCLOSE |stcb->asoc.autoclose_timer |Yes |Yes |No | + *|SCTP_TIMER_TYPE_STRRESET |stcb->asoc.strreset_timer |Yes |Yes |No | + *|SCTP_TIMER_TYPE_INPKILL |inp->sctp_ep.signature_change|Yes |No |No | + *|SCTP_TIMER_TYPE_ASOCKILL |stcb->asoc.strreset_timer |Yes |Yes |No | + *|SCTP_TIMER_TYPE_ADDR_WQ |SCTP_BASE_INFO(addr_wq_timer)|No |No |No | + *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No | + */ + void sctp_timeout_handler(void *t) { +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct epoch_tracker et; +#endif + struct timeval tv; struct sctp_inpcb *inp; struct sctp_tcb *stcb; struct sctp_nets *net; struct sctp_timer *tmr; struct mbuf *op_err; -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif #if defined(__Userspace__) struct socket *upcall_socket = NULL; #endif - int did_output; int type; + int i, secret; + bool did_output, released_asoc_reference; + /* + * If inp, stcb or net are not NULL, then references to these were + * added when the timer was started, and must be released before this + * function returns. + */ tmr = (struct sctp_timer *)t; inp = (struct sctp_inpcb *)tmr->ep; stcb = (struct sctp_tcb *)tmr->tcb; net = (struct sctp_nets *)tmr->net; -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 +#if defined(__FreeBSD__) && !defined(__Userspace__) CURVNET_SET((struct vnet *)tmr->vnet); + NET_EPOCH_ENTER(et); #endif - did_output = 1; + released_asoc_reference = false; #ifdef SCTP_AUDITING_ENABLED sctp_audit_log(0xF0, (uint8_t) tmr->type); @@ -1644,123 +1808,68 @@ sctp_timeout_handler(void *t) #endif /* sanity checks... */ - if (tmr->self != (void *)tmr) { - /* - * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n", - * (void *)tmr); - */ -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 - CURVNET_RESTORE(); -#endif - return; - } + KASSERT(tmr->self == NULL || tmr->self == tmr, + ("sctp_timeout_handler: tmr->self corrupted")); + KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), + ("sctp_timeout_handler: invalid timer type %d", tmr->type)); + type = tmr->type; + KASSERT(stcb == NULL || stcb->sctp_ep == inp, + ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p", + type, stcb, stcb->sctp_ep)); tmr->stopped_from = 0xa001; - if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) { - /* - * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n", - * tmr->type); - */ -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 - CURVNET_RESTORE(); -#endif - return; + if ((stcb != NULL) && (stcb->asoc.state == SCTP_STATE_EMPTY)) { + SCTPDBG(SCTP_DEBUG_TIMER2, + "Timer type %d handler exiting due to CLOSED association.\n", + type); + goto out_decr; } tmr->stopped_from = 0xa002; - if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) { -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 - CURVNET_RESTORE(); -#endif - return; - } - /* if this is an iterator timeout, get the struct and clear inp */ - tmr->stopped_from = 0xa003; - if (inp) { - SCTP_INP_INCR_REF(inp); - if ((inp->sctp_socket == NULL) && - ((tmr->type != SCTP_TIMER_TYPE_INPKILL) && - (tmr->type != SCTP_TIMER_TYPE_INIT) && - (tmr->type != SCTP_TIMER_TYPE_SEND) && - (tmr->type != SCTP_TIMER_TYPE_RECV) && - (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) && - (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) && - (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) && - (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) && - (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) { - SCTP_INP_DECR_REF(inp); -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 - CURVNET_RESTORE(); -#endif - return; - } - } - tmr->stopped_from = 0xa004; - if (stcb) { - atomic_add_int(&stcb->asoc.refcnt, 1); - if (stcb->asoc.state == 0) { - atomic_add_int(&stcb->asoc.refcnt, -1); - if (inp) { - SCTP_INP_DECR_REF(inp); - } -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 - CURVNET_RESTORE(); -#endif - return; - } - } - type = tmr->type; - tmr->stopped_from = 0xa005; - SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type); + SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type); if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { - if (inp) { - SCTP_INP_DECR_REF(inp); - } - if (stcb) { - atomic_add_int(&stcb->asoc.refcnt, -1); - } -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 - CURVNET_RESTORE(); -#endif - return; + SCTPDBG(SCTP_DEBUG_TIMER2, + "Timer type %d handler exiting due to not being active.\n", + type); + goto out_decr; } - tmr->stopped_from = 0xa006; + tmr->stopped_from = 0xa003; if (stcb) { SCTP_TCB_LOCK(stcb); + /* + * Release reference so that association can be freed if + * necessary below. + * This is safe now that we have acquired the lock. + */ atomic_add_int(&stcb->asoc.refcnt, -1); + released_asoc_reference = true; if ((type != SCTP_TIMER_TYPE_ASOCKILL) && - ((stcb->asoc.state == 0) || + ((stcb->asoc.state == SCTP_STATE_EMPTY) || (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { - SCTP_TCB_UNLOCK(stcb); - if (inp) { - SCTP_INP_DECR_REF(inp); - } -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 - CURVNET_RESTORE(); -#endif - return; + SCTPDBG(SCTP_DEBUG_TIMER2, + "Timer type %d handler exiting due to CLOSED association.\n", + type); + goto out; } } else if (inp != NULL) { - if (type != SCTP_TIMER_TYPE_INPKILL) { - SCTP_INP_WLOCK(inp); - } + SCTP_INP_WLOCK(inp); } else { SCTP_WQ_ADDR_LOCK(); } - /* record in stopped what t-o occurred */ - tmr->stopped_from = type; + /* Record in stopped_from which timeout occurred. */ + tmr->stopped_from = type; /* mark as being serviced now */ if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { /* * Callout has been rescheduled. */ - goto get_out; + goto out; } if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { /* * Not active, so no action. */ - goto get_out; + goto out; } SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); @@ -1776,13 +1885,10 @@ sctp_timeout_handler(void *t) #endif /* call the handler for the appropriate timer type */ switch (type) { - case SCTP_TIMER_TYPE_ADDR_WQ: - sctp_handle_addr_wq(); - break; case SCTP_TIMER_TYPE_SEND: - if ((stcb == NULL) || (inp == NULL)) { - break; - } + KASSERT(inp != NULL && stcb != NULL && net != NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); SCTP_STAT_INCR(sctps_timodata); stcb->asoc.timodata++; stcb->asoc.num_send_timers_up--; @@ -1800,65 +1906,72 @@ sctp_timeout_handler(void *t) sctp_auditing(4, inp, stcb, net); #endif sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); + did_output = true; if ((stcb->asoc.num_send_timers_up == 0) && (stcb->asoc.sent_queue_cnt > 0)) { struct sctp_tmit_chunk *chk; /* - * safeguard. If there on some on the sent queue + * Safeguard. If there on some on the sent queue * somewhere but no timers running something is * wrong... so we start a timer on the first chunk * on the send queue on whatever net it is sent to. */ - chk = TAILQ_FIRST(&stcb->asoc.sent_queue); - sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, - chk->whoTo); + TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { + if (chk->whoTo != NULL) { + break; + } + } + if (chk != NULL) { + sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); + } } break; case SCTP_TIMER_TYPE_INIT: - if ((stcb == NULL) || (inp == NULL)) { - break; - } + KASSERT(inp != NULL && stcb != NULL && net != NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); SCTP_STAT_INCR(sctps_timoinit); stcb->asoc.timoinit++; if (sctp_t1init_timer(inp, stcb, net)) { /* no need to unlock on tcb its gone */ goto out_decr; } - /* We do output but not here */ - did_output = 0; + did_output = false; break; case SCTP_TIMER_TYPE_RECV: - if ((stcb == NULL) || (inp == NULL)) { - break; - } + KASSERT(inp != NULL && stcb != NULL && net == NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); SCTP_STAT_INCR(sctps_timosack); stcb->asoc.timosack++; sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); #ifdef SCTP_AUDITING_ENABLED - sctp_auditing(4, inp, stcb, net); + sctp_auditing(4, inp, stcb, NULL); #endif sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); + did_output = true; break; case SCTP_TIMER_TYPE_SHUTDOWN: - if ((stcb == NULL) || (inp == NULL)) { - break; - } + KASSERT(inp != NULL && stcb != NULL && net != NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); + SCTP_STAT_INCR(sctps_timoshutdown); + stcb->asoc.timoshutdown++; if (sctp_shutdown_timer(inp, stcb, net)) { /* no need to unlock on tcb its gone */ goto out_decr; } - SCTP_STAT_INCR(sctps_timoshutdown); - stcb->asoc.timoshutdown++; #ifdef SCTP_AUDITING_ENABLED sctp_auditing(4, inp, stcb, net); #endif sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); + did_output = true; break; case SCTP_TIMER_TYPE_HEARTBEAT: - if ((stcb == NULL) || (inp == NULL) || (net == NULL)) { - break; - } + KASSERT(inp != NULL && stcb != NULL && net != NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); SCTP_STAT_INCR(sctps_timoheartbeat); stcb->asoc.timoheartbeat++; if (sctp_heartbeat_timer(inp, stcb, net)) { @@ -1871,19 +1984,21 @@ sctp_timeout_handler(void *t) if (!(net->dest_state & SCTP_ADDR_NOHB)) { sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); + did_output = true; + } else { + did_output = false; } break; case SCTP_TIMER_TYPE_COOKIE: - if ((stcb == NULL) || (inp == NULL)) { - break; - } - + KASSERT(inp != NULL && stcb != NULL && net != NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); + SCTP_STAT_INCR(sctps_timocookie); + stcb->asoc.timocookie++; if (sctp_cookie_timer(inp, stcb, net)) { /* no need to unlock on tcb its gone */ goto out_decr; } - SCTP_STAT_INCR(sctps_timocookie); - stcb->asoc.timocookie++; #ifdef SCTP_AUDITING_ENABLED sctp_auditing(4, inp, stcb, net); #endif @@ -1892,45 +2007,42 @@ sctp_timeout_handler(void *t) * respect to where from in chunk_output. */ sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); + did_output = true; break; case SCTP_TIMER_TYPE_NEWCOOKIE: - { - struct timeval tv; - int i, secret; - if (inp == NULL) { - break; - } - SCTP_STAT_INCR(sctps_timosecret); - (void)SCTP_GETTIME_TIMEVAL(&tv); - inp->sctp_ep.time_of_secret_change = tv.tv_sec; - inp->sctp_ep.last_secret_number = - inp->sctp_ep.current_secret_number; - inp->sctp_ep.current_secret_number++; - if (inp->sctp_ep.current_secret_number >= - SCTP_HOW_MANY_SECRETS) { - inp->sctp_ep.current_secret_number = 0; - } - secret = (int)inp->sctp_ep.current_secret_number; - for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { - inp->sctp_ep.secret_key[secret][i] = - sctp_select_initial_TSN(&inp->sctp_ep); - } - sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net); + KASSERT(inp != NULL && stcb == NULL && net == NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); + SCTP_STAT_INCR(sctps_timosecret); + (void)SCTP_GETTIME_TIMEVAL(&tv); + inp->sctp_ep.time_of_secret_change = (unsigned int)tv.tv_sec; + inp->sctp_ep.last_secret_number = + inp->sctp_ep.current_secret_number; + inp->sctp_ep.current_secret_number++; + if (inp->sctp_ep.current_secret_number >= + SCTP_HOW_MANY_SECRETS) { + inp->sctp_ep.current_secret_number = 0; } - did_output = 0; + secret = (int)inp->sctp_ep.current_secret_number; + for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { + inp->sctp_ep.secret_key[secret][i] = + sctp_select_initial_TSN(&inp->sctp_ep); + } + sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); + did_output = false; break; case SCTP_TIMER_TYPE_PATHMTURAISE: - if ((stcb == NULL) || (inp == NULL)) { - break; - } + KASSERT(inp != NULL && stcb != NULL && net != NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); SCTP_STAT_INCR(sctps_timopathmtu); sctp_pathmtu_timer(inp, stcb, net); - did_output = 0; + did_output = false; break; case SCTP_TIMER_TYPE_SHUTDOWNACK: - if ((stcb == NULL) || (inp == NULL)) { - break; - } + KASSERT(inp != NULL && stcb != NULL && net != NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); if (sctp_shutdownack_timer(inp, stcb, net)) { /* no need to unlock on tcb its gone */ goto out_decr; @@ -1941,70 +2053,86 @@ sctp_timeout_handler(void *t) sctp_auditing(4, inp, stcb, net); #endif sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); - break; - case SCTP_TIMER_TYPE_SHUTDOWNGUARD: - if ((stcb == NULL) || (inp == NULL)) { - break; - } - SCTP_STAT_INCR(sctps_timoshutdownguard); - op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), - "Shutdown guard timer expired"); - sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); - /* no need to unlock on tcb its gone */ - goto out_decr; - - case SCTP_TIMER_TYPE_STRRESET: - if ((stcb == NULL) || (inp == NULL)) { - break; - } - if (sctp_strreset_timer(inp, stcb, net)) { - /* no need to unlock on tcb its gone */ - goto out_decr; - } - SCTP_STAT_INCR(sctps_timostrmrst); - sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); + did_output = true; break; case SCTP_TIMER_TYPE_ASCONF: - if ((stcb == NULL) || (inp == NULL)) { - break; - } + KASSERT(inp != NULL && stcb != NULL && net != NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); + SCTP_STAT_INCR(sctps_timoasconf); if (sctp_asconf_timer(inp, stcb, net)) { /* no need to unlock on tcb its gone */ goto out_decr; } - SCTP_STAT_INCR(sctps_timoasconf); #ifdef SCTP_AUDITING_ENABLED sctp_auditing(4, inp, stcb, net); #endif sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); + did_output = true; break; - case SCTP_TIMER_TYPE_PRIM_DELETED: - if ((stcb == NULL) || (inp == NULL)) { - break; - } - sctp_delete_prim_timer(inp, stcb, net); - SCTP_STAT_INCR(sctps_timodelprim); - break; - + case SCTP_TIMER_TYPE_SHUTDOWNGUARD: + KASSERT(inp != NULL && stcb != NULL && net == NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); + SCTP_STAT_INCR(sctps_timoshutdownguard); + op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), + "Shutdown guard timer expired"); + sctp_abort_an_association(inp, stcb, op_err, true, SCTP_SO_NOT_LOCKED); + /* no need to unlock on tcb its gone */ + goto out_decr; case SCTP_TIMER_TYPE_AUTOCLOSE: - if ((stcb == NULL) || (inp == NULL)) { - break; - } + KASSERT(inp != NULL && stcb != NULL && net == NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); SCTP_STAT_INCR(sctps_timoautoclose); - sctp_autoclose_timer(inp, stcb, net); + sctp_autoclose_timer(inp, stcb); sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); - did_output = 0; + did_output = true; break; - case SCTP_TIMER_TYPE_ASOCKILL: - if ((stcb == NULL) || (inp == NULL)) { - break; + case SCTP_TIMER_TYPE_STRRESET: + KASSERT(inp != NULL && stcb != NULL && net == NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); + SCTP_STAT_INCR(sctps_timostrmrst); + if (sctp_strreset_timer(inp, stcb)) { + /* no need to unlock on tcb its gone */ + goto out_decr; } + sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); + did_output = true; + break; + case SCTP_TIMER_TYPE_INPKILL: + KASSERT(inp != NULL && stcb == NULL && net == NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); + SCTP_STAT_INCR(sctps_timoinpkill); + /* + * special case, take away our increment since WE are the + * killer + */ + sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, + SCTP_FROM_SCTPUTIL + SCTP_LOC_3); +#if defined(__APPLE__) && !defined(__Userspace__) + SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1); +#endif + SCTP_INP_DECR_REF(inp); + SCTP_INP_WUNLOCK(inp); + sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, + SCTP_CALLED_FROM_INPKILL_TIMER); +#if defined(__APPLE__) && !defined(__Userspace__) + SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1); +#endif + inp = NULL; + goto out_decr; + case SCTP_TIMER_TYPE_ASOCKILL: + KASSERT(inp != NULL && stcb != NULL && net == NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); SCTP_STAT_INCR(sctps_timoassockill); /* Can we free it yet? */ - SCTP_INP_DECR_REF(inp); sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(inp); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -2014,7 +2142,7 @@ sctp_timeout_handler(void *t) #endif (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif /* @@ -2022,40 +2150,35 @@ sctp_timeout_handler(void *t) * duplicate unlock or unlock of a free mtx :-0 */ stcb = NULL; - goto out_no_decr; - case SCTP_TIMER_TYPE_INPKILL: - SCTP_STAT_INCR(sctps_timoinpkill); - if (inp == NULL) { - break; - } - /* - * special case, take away our increment since WE are the - * killer - */ - SCTP_INP_DECR_REF(inp); - sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, - SCTP_FROM_SCTPUTIL + SCTP_LOC_3); -#if defined(__APPLE__) - SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1); -#endif - sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, - SCTP_CALLED_FROM_INPKILL_TIMER); -#if defined(__APPLE__) - SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1); -#endif - inp = NULL; - goto out_no_decr; - default: - SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n", - type); + goto out_decr; + case SCTP_TIMER_TYPE_ADDR_WQ: + KASSERT(inp == NULL && stcb == NULL && net == NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); + sctp_handle_addr_wq(); + did_output = true; break; + case SCTP_TIMER_TYPE_PRIM_DELETED: + KASSERT(inp != NULL && stcb != NULL && net == NULL, + ("timeout of type %d: inp = %p, stcb = %p, net = %p", + type, inp, stcb, net)); + SCTP_STAT_INCR(sctps_timodelprim); + sctp_delete_prim_timer(inp, stcb); + did_output = false; + break; + default: +#ifdef INVARIANTS + panic("Unknown timer type %d", type); +#else + goto out; +#endif } #ifdef SCTP_AUDITING_ENABLED sctp_audit_log(0xF1, (uint8_t) type); - if (inp) + if (inp != NULL) sctp_auditing(5, inp, stcb, net); #endif - if ((did_output) && stcb) { + if (did_output && (stcb != NULL)) { /* * Now we need to clean up the control chunk chain if an * ECNE is on it. It must be marked as UNSENT again so next @@ -2065,8 +2188,8 @@ sctp_timeout_handler(void *t) */ sctp_fix_ecn_echo(&stcb->asoc); } -get_out: - if (stcb) { +out: + if (stcb != NULL) { SCTP_TCB_UNLOCK(stcb); } else if (inp != NULL) { SCTP_INP_WUNLOCK(inp); @@ -2086,162 +2209,359 @@ out_decr: sorele(upcall_socket); } #endif - if (inp) { + /* These reference counts were incremented in sctp_timer_start(). */ + if (inp != NULL) { SCTP_INP_DECR_REF(inp); } - -out_no_decr: - SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type); -#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 + if ((stcb != NULL) && !released_asoc_reference) { + atomic_add_int(&stcb->asoc.refcnt, -1); + } + if (net != NULL) { + sctp_free_remote_addr(net); + } + SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type); +#if defined(__FreeBSD__) && !defined(__Userspace__) CURVNET_RESTORE(); + NET_EPOCH_EXIT(et); #endif } +/*- + * The following table shows which parameters must be provided + * when calling sctp_timer_start(). For parameters not being + * provided, NULL must be used. + * + * |Name |inp |stcb|net | + * |-----------------------------|----|----|----| + * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | + * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | + * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | + * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | + * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | + * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | + * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | + * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | + * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | + * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |Yes | + * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | + * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | + * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |Yes | + * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | + * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | + * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | + * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | + * + */ + void sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net) { - uint32_t to_ticks; struct sctp_timer *tmr; + uint32_t to_ticks; + uint32_t rndval, jitter; - if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) - return; - + KASSERT(stcb == NULL || stcb->sctp_ep == inp, + ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p", + t_type, stcb, stcb->sctp_ep)); tmr = NULL; - if (stcb) { + if (stcb != NULL) { SCTP_TCB_LOCK_ASSERT(stcb); + } else if (inp != NULL) { + SCTP_INP_WLOCK_ASSERT(inp); + } else { + SCTP_WQ_ADDR_LOCK_ASSERT(); + } + if (stcb != NULL) { + /* Don't restart timer on association that's about to be killed. */ + if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && + (t_type != SCTP_TIMER_TYPE_ASOCKILL)) { + SCTPDBG(SCTP_DEBUG_TIMER2, + "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n", + t_type, inp, stcb, net); + return; + } + /* Don't restart timer on net that's been removed. */ + if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) { + SCTPDBG(SCTP_DEBUG_TIMER2, + "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n", + t_type, inp, stcb, net); + return; + } } switch (t_type) { - case SCTP_TIMER_TYPE_ADDR_WQ: - /* Only 1 tick away :-) */ - tmr = &SCTP_BASE_INFO(addr_wq_timer); - to_ticks = SCTP_ADDRESS_TICK_DELAY; - break; case SCTP_TIMER_TYPE_SEND: - /* Here we use the RTO timer */ - { - int rto_val; - - if ((stcb == NULL) || (net == NULL)) { - return; - } - tmr = &net->rxt_timer; - if (net->RTO == 0) { - rto_val = stcb->asoc.initial_rto; - } else { - rto_val = net->RTO; - } - to_ticks = MSEC_TO_TICKS(rto_val); + /* Here we use the RTO timer. */ + if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else + return; +#endif + } + tmr = &net->rxt_timer; + if (net->RTO == 0) { + to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); + } else { + to_ticks = sctp_msecs_to_ticks(net->RTO); } break; case SCTP_TIMER_TYPE_INIT: /* * Here we use the INIT timer default usually about 1 - * minute. + * second. */ - if ((stcb == NULL) || (net == NULL)) { + if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; +#endif } tmr = &net->rxt_timer; if (net->RTO == 0) { - to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); + to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); } else { - to_ticks = MSEC_TO_TICKS(net->RTO); + to_ticks = sctp_msecs_to_ticks(net->RTO); } break; case SCTP_TIMER_TYPE_RECV: /* - * Here we use the Delayed-Ack timer value from the inp + * Here we use the Delayed-Ack timer value from the inp, * ususually about 200ms. */ - if (stcb == NULL) { + if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; +#endif } tmr = &stcb->asoc.dack_timer; - to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack); + to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack); break; case SCTP_TIMER_TYPE_SHUTDOWN: /* Here we use the RTO of the destination. */ - if ((stcb == NULL) || (net == NULL)) { + if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; - } - if (net->RTO == 0) { - to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); - } else { - to_ticks = MSEC_TO_TICKS(net->RTO); +#endif } tmr = &net->rxt_timer; + if (net->RTO == 0) { + to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); + } else { + to_ticks = sctp_msecs_to_ticks(net->RTO); + } break; case SCTP_TIMER_TYPE_HEARTBEAT: /* - * the net is used here so that we can add in the RTO. Even + * The net is used here so that we can add in the RTO. Even * though we use a different timer. We also add the HB timer * PLUS a random jitter. */ - if ((stcb == NULL) || (net == NULL)) { + if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; - } else { - uint32_t rndval; - uint32_t jitter; - - if ((net->dest_state & SCTP_ADDR_NOHB) && - !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { - return; - } - if (net->RTO == 0) { - to_ticks = stcb->asoc.initial_rto; - } else { - to_ticks = net->RTO; - } - rndval = sctp_select_initial_TSN(&inp->sctp_ep); - jitter = rndval % to_ticks; - if (jitter >= (to_ticks >> 1)) { - to_ticks = to_ticks + (jitter - (to_ticks >> 1)); - } else { - to_ticks = to_ticks - jitter; - } - if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && - !(net->dest_state & SCTP_ADDR_PF)) { - to_ticks += net->heart_beat_delay; - } - /* - * Now we must convert the to_ticks that are now in - * ms to ticks. - */ - to_ticks = MSEC_TO_TICKS(to_ticks); - tmr = &net->hb_timer; +#endif } + if ((net->dest_state & SCTP_ADDR_NOHB) && + !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { + SCTPDBG(SCTP_DEBUG_TIMER2, + "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", + t_type, inp, stcb, net); + return; + } + tmr = &net->hb_timer; + if (net->RTO == 0) { + to_ticks = stcb->asoc.initial_rto; + } else { + to_ticks = net->RTO; + } + rndval = sctp_select_initial_TSN(&inp->sctp_ep); + jitter = rndval % to_ticks; + if (to_ticks > 1) { + to_ticks >>= 1; + } + if (jitter < (UINT32_MAX - to_ticks)) { + to_ticks += jitter; + } else { + to_ticks = UINT32_MAX; + } + if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && + !(net->dest_state & SCTP_ADDR_PF)) { + if (net->heart_beat_delay < (UINT32_MAX - to_ticks)) { + to_ticks += net->heart_beat_delay; + } else { + to_ticks = UINT32_MAX; + } + } + /* + * Now we must convert the to_ticks that are now in + * ms to ticks. + */ + to_ticks = sctp_msecs_to_ticks(to_ticks); break; case SCTP_TIMER_TYPE_COOKIE: /* * Here we can use the RTO timer from the network since one - * RTT was compelete. If a retran happened then we will be - * using the RTO initial value. + * RTT was complete. If a retransmission happened then we will + * be using the RTO initial value. */ - if ((stcb == NULL) || (net == NULL)) { + if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; - } - if (net->RTO == 0) { - to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); - } else { - to_ticks = MSEC_TO_TICKS(net->RTO); +#endif } tmr = &net->rxt_timer; + if (net->RTO == 0) { + to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); + } else { + to_ticks = sctp_msecs_to_ticks(net->RTO); + } break; case SCTP_TIMER_TYPE_NEWCOOKIE: /* - * nothing needed but the endpoint here ususually about 60 + * Nothing needed but the endpoint here ususually about 60 * minutes. */ + if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else + return; +#endif + } tmr = &inp->sctp_ep.signature_change; to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; break; - case SCTP_TIMER_TYPE_ASOCKILL: - if (stcb == NULL) { + case SCTP_TIMER_TYPE_PATHMTURAISE: + /* + * Here we use the value found in the EP for PMTUD, ususually + * about 10 minutes. + */ + if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else + return; +#endif + } + if (net->dest_state & SCTP_ADDR_NO_PMTUD) { + SCTPDBG(SCTP_DEBUG_TIMER2, + "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n", + t_type, inp, stcb, net); return; } + tmr = &net->pmtu_timer; + to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; + break; + case SCTP_TIMER_TYPE_SHUTDOWNACK: + /* Here we use the RTO of the destination. */ + if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else + return; +#endif + } + tmr = &net->rxt_timer; + if (net->RTO == 0) { + to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); + } else { + to_ticks = sctp_msecs_to_ticks(net->RTO); + } + break; + case SCTP_TIMER_TYPE_ASCONF: + /* + * Here the timer comes from the stcb but its value is from + * the net's RTO. + */ + if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else + return; +#endif + } + tmr = &stcb->asoc.asconf_timer; + if (net->RTO == 0) { + to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); + } else { + to_ticks = sctp_msecs_to_ticks(net->RTO); + } + break; + case SCTP_TIMER_TYPE_SHUTDOWNGUARD: + /* + * Here we use the endpoints shutdown guard timer usually + * about 3 minutes. + */ + if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else + return; +#endif + } + tmr = &stcb->asoc.shut_guard_timer; + if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { + if (stcb->asoc.maxrto < UINT32_MAX / 5) { + to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto); + } else { + to_ticks = sctp_msecs_to_ticks(UINT32_MAX); + } + } else { + to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; + } + break; + case SCTP_TIMER_TYPE_AUTOCLOSE: + if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else + return; +#endif + } + tmr = &stcb->asoc.autoclose_timer; + to_ticks = stcb->asoc.sctp_autoclose_ticks; + break; + case SCTP_TIMER_TYPE_STRRESET: + /* + * Here the timer comes from the stcb but its value is from + * the net's RTO. + */ + if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else + return; +#endif + } tmr = &stcb->asoc.strreset_timer; - to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT); + if (net->RTO == 0) { + to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); + } else { + to_ticks = sctp_msecs_to_ticks(net->RTO); + } break; case SCTP_TIMER_TYPE_INPKILL: /* @@ -2249,120 +2569,74 @@ sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, * timer since that has stopped and we are in the GONE * state. */ + if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else + return; +#endif + } tmr = &inp->sctp_ep.signature_change; - to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT); + to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT); break; - case SCTP_TIMER_TYPE_PATHMTURAISE: - /* - * Here we use the value found in the EP for PMTU ususually - * about 10 minutes. - */ - if ((stcb == NULL) || (net == NULL)) { + case SCTP_TIMER_TYPE_ASOCKILL: + if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; - } - if (net->dest_state & SCTP_ADDR_NO_PMTUD) { - return; - } - to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; - tmr = &net->pmtu_timer; - break; - case SCTP_TIMER_TYPE_SHUTDOWNACK: - /* Here we use the RTO of the destination */ - if ((stcb == NULL) || (net == NULL)) { - return; - } - if (net->RTO == 0) { - to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); - } else { - to_ticks = MSEC_TO_TICKS(net->RTO); - } - tmr = &net->rxt_timer; - break; - case SCTP_TIMER_TYPE_SHUTDOWNGUARD: - /* - * Here we use the endpoints shutdown guard timer usually - * about 3 minutes. - */ - if (stcb == NULL) { - return; - } - if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) { - to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto); - } else { - to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; - } - tmr = &stcb->asoc.shut_guard_timer; - break; - case SCTP_TIMER_TYPE_STRRESET: - /* - * Here the timer comes from the stcb but its value is from - * the net's RTO. - */ - if ((stcb == NULL) || (net == NULL)) { - return; - } - if (net->RTO == 0) { - to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); - } else { - to_ticks = MSEC_TO_TICKS(net->RTO); +#endif } tmr = &stcb->asoc.strreset_timer; + to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT); break; - case SCTP_TIMER_TYPE_ASCONF: - /* - * Here the timer comes from the stcb but its value is from - * the net's RTO. - */ - if ((stcb == NULL) || (net == NULL)) { + case SCTP_TIMER_TYPE_ADDR_WQ: + if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; +#endif } - if (net->RTO == 0) { - to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); - } else { - to_ticks = MSEC_TO_TICKS(net->RTO); - } - tmr = &stcb->asoc.asconf_timer; + /* Only 1 tick away :-) */ + tmr = &SCTP_BASE_INFO(addr_wq_timer); + to_ticks = SCTP_ADDRESS_TICK_DELAY; break; case SCTP_TIMER_TYPE_PRIM_DELETED: - if ((stcb == NULL) || (net != NULL)) { + if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; +#endif } - to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); tmr = &stcb->asoc.delete_prim_timer; - break; - case SCTP_TIMER_TYPE_AUTOCLOSE: - if (stcb == NULL) { - return; - } - if (stcb->asoc.sctp_autoclose_ticks == 0) { - /* - * Really an error since stcb is NOT set to - * autoclose - */ - return; - } - to_ticks = stcb->asoc.sctp_autoclose_ticks; - tmr = &stcb->asoc.autoclose_timer; + to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto); break; default: - SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", - __func__, t_type); - return; - break; - } - if ((to_ticks <= 0) || (tmr == NULL)) { - SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n", - __func__, t_type, to_ticks, (void *)tmr); +#ifdef INVARIANTS + panic("Unknown timer type %d", t_type); +#else return; +#endif } + KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); + KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type)); if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { /* - * we do NOT allow you to have it already running. if it is - * we leave the current one up unchanged + * We do NOT allow you to have it already running. If it is, + * we leave the current one up unchanged. */ + SCTPDBG(SCTP_DEBUG_TIMER2, + "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n", + t_type, inp, stcb, net); return; } - /* At this point we can proceed */ + /* At this point we can proceed. */ if (t_type == SCTP_TIMER_TYPE_SEND) { stcb->asoc.num_send_timers_up++; } @@ -2370,155 +2644,301 @@ sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, tmr->type = t_type; tmr->ep = (void *)inp; tmr->tcb = (void *)stcb; - tmr->net = (void *)net; + if (t_type == SCTP_TIMER_TYPE_STRRESET) { + tmr->net = NULL; + } else { + tmr->net = (void *)net; + } tmr->self = (void *)tmr; -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 +#if defined(__FreeBSD__) && !defined(__Userspace__) tmr->vnet = (void *)curvnet; #endif -#ifndef __Panda__ tmr->ticks = sctp_get_tick_count(); -#endif - (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr); + if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) { + SCTPDBG(SCTP_DEBUG_TIMER2, + "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n", + t_type, to_ticks, inp, stcb, net); + /* + * If this is a newly scheduled callout, as opposed to a + * rescheduled one, increment relevant reference counts. + */ + if (tmr->ep != NULL) { + SCTP_INP_INCR_REF(inp); + } + if (tmr->tcb != NULL) { + atomic_add_int(&stcb->asoc.refcnt, 1); + } + if (tmr->net != NULL) { + atomic_add_int(&net->ref_count, 1); + } + } else { + /* + * This should not happen, since we checked for pending + * above. + */ + SCTPDBG(SCTP_DEBUG_TIMER2, + "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n", + t_type, to_ticks, inp, stcb, net); + } return; } +/*- + * The following table shows which parameters must be provided + * when calling sctp_timer_stop(). For parameters not being + * provided, NULL must be used. + * + * |Name |inp |stcb|net | + * |-----------------------------|----|----|----| + * |SCTP_TIMER_TYPE_SEND |Yes |Yes |Yes | + * |SCTP_TIMER_TYPE_INIT |Yes |Yes |Yes | + * |SCTP_TIMER_TYPE_RECV |Yes |Yes |No | + * |SCTP_TIMER_TYPE_SHUTDOWN |Yes |Yes |Yes | + * |SCTP_TIMER_TYPE_HEARTBEAT |Yes |Yes |Yes | + * |SCTP_TIMER_TYPE_COOKIE |Yes |Yes |Yes | + * |SCTP_TIMER_TYPE_NEWCOOKIE |Yes |No |No | + * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes | + * |SCTP_TIMER_TYPE_SHUTDOWNACK |Yes |Yes |Yes | + * |SCTP_TIMER_TYPE_ASCONF |Yes |Yes |No | + * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No | + * |SCTP_TIMER_TYPE_AUTOCLOSE |Yes |Yes |No | + * |SCTP_TIMER_TYPE_STRRESET |Yes |Yes |No | + * |SCTP_TIMER_TYPE_INPKILL |Yes |No |No | + * |SCTP_TIMER_TYPE_ASOCKILL |Yes |Yes |No | + * |SCTP_TIMER_TYPE_ADDR_WQ |No |No |No | + * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No | + * + */ + void sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t from) { struct sctp_timer *tmr; - if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && - (inp == NULL)) - return; - - tmr = NULL; - if (stcb) { + KASSERT(stcb == NULL || stcb->sctp_ep == inp, + ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p", + t_type, stcb, stcb->sctp_ep)); + if (stcb != NULL) { SCTP_TCB_LOCK_ASSERT(stcb); + } else if (inp != NULL) { + SCTP_INP_WLOCK_ASSERT(inp); + } else { + SCTP_WQ_ADDR_LOCK_ASSERT(); } + tmr = NULL; switch (t_type) { - case SCTP_TIMER_TYPE_ADDR_WQ: - tmr = &SCTP_BASE_INFO(addr_wq_timer); - break; case SCTP_TIMER_TYPE_SEND: - if ((stcb == NULL) || (net == NULL)) { + if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; +#endif } tmr = &net->rxt_timer; break; case SCTP_TIMER_TYPE_INIT: - if ((stcb == NULL) || (net == NULL)) { + if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; +#endif } tmr = &net->rxt_timer; break; case SCTP_TIMER_TYPE_RECV: - if (stcb == NULL) { + if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; +#endif } tmr = &stcb->asoc.dack_timer; break; case SCTP_TIMER_TYPE_SHUTDOWN: - if ((stcb == NULL) || (net == NULL)) { + if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; +#endif } tmr = &net->rxt_timer; break; case SCTP_TIMER_TYPE_HEARTBEAT: - if ((stcb == NULL) || (net == NULL)) { + if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; +#endif } tmr = &net->hb_timer; break; case SCTP_TIMER_TYPE_COOKIE: - if ((stcb == NULL) || (net == NULL)) { + if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; +#endif } tmr = &net->rxt_timer; break; case SCTP_TIMER_TYPE_NEWCOOKIE: - /* nothing needed but the endpoint here */ - tmr = &inp->sctp_ep.signature_change; - /* - * We re-use the newcookie timer for the INP kill timer. We - * must assure that we do not kill it by accident. - */ - break; - case SCTP_TIMER_TYPE_ASOCKILL: - /* - * Stop the asoc kill timer. - */ - if (stcb == NULL) { + if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; +#endif + } + tmr = &inp->sctp_ep.signature_change; + break; + case SCTP_TIMER_TYPE_PATHMTURAISE: + if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else + return; +#endif + } + tmr = &net->pmtu_timer; + break; + case SCTP_TIMER_TYPE_SHUTDOWNACK: + if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else + return; +#endif + } + tmr = &net->rxt_timer; + break; + case SCTP_TIMER_TYPE_ASCONF: + if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else + return; +#endif + } + tmr = &stcb->asoc.asconf_timer; + break; + case SCTP_TIMER_TYPE_SHUTDOWNGUARD: + if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else + return; +#endif + } + tmr = &stcb->asoc.shut_guard_timer; + break; + case SCTP_TIMER_TYPE_AUTOCLOSE: + if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else + return; +#endif + } + tmr = &stcb->asoc.autoclose_timer; + break; + case SCTP_TIMER_TYPE_STRRESET: + if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else + return; +#endif } tmr = &stcb->asoc.strreset_timer; break; - case SCTP_TIMER_TYPE_INPKILL: /* * The inp is setup to die. We re-use the signature_chage * timer since that has stopped and we are in the GONE * state. */ + if ((inp == NULL) || (stcb != NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else + return; +#endif + } tmr = &inp->sctp_ep.signature_change; break; - case SCTP_TIMER_TYPE_PATHMTURAISE: - if ((stcb == NULL) || (net == NULL)) { - return; - } - tmr = &net->pmtu_timer; - break; - case SCTP_TIMER_TYPE_SHUTDOWNACK: - if ((stcb == NULL) || (net == NULL)) { - return; - } - tmr = &net->rxt_timer; - break; - case SCTP_TIMER_TYPE_SHUTDOWNGUARD: - if (stcb == NULL) { - return; - } - tmr = &stcb->asoc.shut_guard_timer; - break; - case SCTP_TIMER_TYPE_STRRESET: - if (stcb == NULL) { + case SCTP_TIMER_TYPE_ASOCKILL: + if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; +#endif } tmr = &stcb->asoc.strreset_timer; break; - case SCTP_TIMER_TYPE_ASCONF: - if (stcb == NULL) { + case SCTP_TIMER_TYPE_ADDR_WQ: + if ((inp != NULL) || (stcb != NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; +#endif } - tmr = &stcb->asoc.asconf_timer; + tmr = &SCTP_BASE_INFO(addr_wq_timer); break; case SCTP_TIMER_TYPE_PRIM_DELETED: - if (stcb == NULL) { + if ((inp == NULL) || (stcb == NULL) || (net != NULL)) { +#ifdef INVARIANTS + panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p", + t_type, inp, stcb, net); +#else return; +#endif } tmr = &stcb->asoc.delete_prim_timer; break; - case SCTP_TIMER_TYPE_AUTOCLOSE: - if (stcb == NULL) { - return; - } - tmr = &stcb->asoc.autoclose_timer; - break; default: - SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", - __func__, t_type); - break; - } - if (tmr == NULL) { +#ifdef INVARIANTS + panic("Unknown timer type %d", t_type); +#else return; +#endif } - if ((tmr->type != t_type) && tmr->type) { + KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type)); + if ((tmr->type != SCTP_TIMER_TYPE_NONE) && + (tmr->type != t_type)) { /* * Ok we have a timer that is under joint use. Cookie timer * per chance with the SEND timer. We therefore are NOT * running the timer that the caller wants stopped. So just * return. */ + SCTPDBG(SCTP_DEBUG_TIMER2, + "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n", + t_type, inp, stcb, net); return; } if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { @@ -2529,7 +2949,45 @@ sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, } tmr->self = NULL; tmr->stopped_from = from; - (void)SCTP_OS_TIMER_STOP(&tmr->timer); + if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) { + KASSERT(tmr->ep == inp, + ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p", + t_type, inp, tmr->ep)); + KASSERT(tmr->tcb == stcb, + ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p", + t_type, stcb, tmr->tcb)); + KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) || + ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)), + ("sctp_timer_stop of type %d: net = %p, tmr->net = %p", + t_type, net, tmr->net)); + SCTPDBG(SCTP_DEBUG_TIMER2, + "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n", + t_type, inp, stcb, net); + /* + * If the timer was actually stopped, decrement reference counts + * that were incremented in sctp_timer_start(). + */ + if (tmr->ep != NULL) { + SCTP_INP_DECR_REF(inp); + tmr->ep = NULL; + } + if (tmr->tcb != NULL) { + atomic_add_int(&stcb->asoc.refcnt, -1); + tmr->tcb = NULL; + } + if (tmr->net != NULL) { + /* + * Can't use net, since it doesn't work for + * SCTP_TIMER_TYPE_ASCONF. + */ + sctp_free_remote_addr((struct sctp_nets *)tmr->net); + tmr->net = NULL; + } + } else { + SCTPDBG(SCTP_DEBUG_TIMER2, + "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n", + t_type, inp, stcb, net); + } return; } @@ -2578,7 +3036,6 @@ sctp_mtu_size_reset(struct sctp_inpcb *inp, } } - /* * Given an association and starting time of the current RTT period, update * RTO in number of msecs. net should point to the current network. @@ -2609,7 +3066,7 @@ sctp_calculate_rto(struct sctp_tcb *stcb, (void)SCTP_GETTIME_TIMEVAL(&now); } if ((old->tv_sec > now.tv_sec) || - ((old->tv_sec == now.tv_sec) && (old->tv_sec > now.tv_sec))) { + ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) { /* The starting point is in the future. */ return (0); } @@ -2740,8 +3197,6 @@ sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr) } } - - struct sctp_paramhdr * sctp_get_next_param(struct mbuf *m, int offset, @@ -2753,7 +3208,6 @@ sctp_get_next_param(struct mbuf *m, (uint8_t *) pull)); } - struct mbuf * sctp_add_pad_tombuf(struct mbuf *m, int padlen) { @@ -2805,11 +3259,8 @@ sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) static void sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, - uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ) + uint16_t error, struct sctp_abort_chunk *abort, + bool from_peer, bool timedout, int so_locked) { struct mbuf *m_notify; struct sctp_assoc_change *sac; @@ -2817,10 +3268,14 @@ sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, unsigned int notif_len; uint16_t abort_len; unsigned int i; -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif + KASSERT(abort == NULL || from_peer, + ("sctp_notify_assoc_change: ABORT chunk provided for local termination")); + KASSERT(!from_peer || !timedout, + ("sctp_notify_assoc_change: timeouts can only be local")); if (stcb == NULL) { return; } @@ -2860,9 +3315,13 @@ sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, sac->sac_length = sizeof(struct sctp_assoc_change); sac->sac_state = state; sac->sac_error = error; - /* XXX verify these stream counts */ - sac->sac_outbound_streams = stcb->asoc.streamoutcnt; - sac->sac_inbound_streams = stcb->asoc.streamincnt; + if (state == SCTP_CANT_STR_ASSOC) { + sac->sac_outbound_streams = 0; + sac->sac_inbound_streams = 0; + } else { + sac->sac_outbound_streams = stcb->asoc.streamoutcnt; + sac->sac_inbound_streams = stcb->asoc.streamincnt; + } sac->sac_assoc_id = sctp_get_associd(stcb); if (notif_len > sizeof(struct sctp_assoc_change)) { if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { @@ -2924,8 +3383,7 @@ set_error: stcb->sctp_socket->so_error = ECONNRESET; } } else { - if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || - (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { + if (timedout) { SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); stcb->sctp_socket->so_error = ETIMEDOUT; } else { @@ -2936,7 +3394,7 @@ set_error: SOCK_UNLOCK(stcb->sctp_socket); } /* Wake ANY sleepers */ -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(stcb->sctp_ep); if (!so_locked) { atomic_add_int(&stcb->asoc.refcnt, 1); @@ -2957,7 +3415,7 @@ set_error: } sorwakeup(stcb->sctp_socket); sowwakeup(stcb->sctp_socket); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) if (!so_locked) { SCTP_SOCKET_UNLOCK(so, 1); } @@ -2966,11 +3424,7 @@ set_error: static void sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, - struct sockaddr *sa, uint32_t error, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif -) + struct sockaddr *sa, uint32_t error, int so_locked) { struct mbuf *m_notify; struct sctp_paddr_change *spc; @@ -3069,14 +3523,9 @@ sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, so_locked); } - static void sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, - struct sctp_tmit_chunk *chk, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ) + struct sctp_tmit_chunk *chk, int so_locked) { struct mbuf *m_notify; struct sctp_send_failed *ssf; @@ -3205,14 +3654,9 @@ sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, so_locked); } - static void sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, - struct sctp_stream_queue_pending *sp, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ) + struct sctp_stream_queue_pending *sp, int so_locked) { struct mbuf *m_notify; struct sctp_send_failed *ssf; @@ -3306,8 +3750,6 @@ sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); } - - static void sctp_notify_adaptation_layer(struct sctp_tcb *stcb) { @@ -3358,11 +3800,7 @@ sctp_notify_adaptation_layer(struct sctp_tcb *stcb) /* This always must be called with the read-queue LOCKED in the INP */ static void sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, - uint32_t val, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ) + uint32_t val, int so_locked) { struct mbuf *m_notify; struct sctp_pdapi_event *pdapi; @@ -3424,7 +3862,7 @@ sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, } if (stcb->sctp_ep && stcb->sctp_socket) { /* This should always be the case */ -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; so = SCTP_INP_SO(stcb->sctp_ep); @@ -3441,7 +3879,7 @@ sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, } #endif sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) if (!so_locked) { SCTP_SOCKET_UNLOCK(so, 1); } @@ -3463,7 +3901,7 @@ sctp_notify_shutdown_event(struct sctp_tcb *stcb) if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { /* mark socket closed for read/write and wakeup! */ -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; so = SCTP_INP_SO(stcb->sctp_ep); @@ -3478,7 +3916,7 @@ sctp_notify_shutdown_event(struct sctp_tcb *stcb) } #endif socantsendmore(stcb->sctp_socket); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif } @@ -3521,11 +3959,7 @@ sctp_notify_shutdown_event(struct sctp_tcb *stcb) static void sctp_notify_sender_dry_event(struct sctp_tcb *stcb, - int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ) + int so_locked) { struct mbuf *m_notify; struct sctp_sender_dry_event *event; @@ -3570,7 +4004,6 @@ sctp_notify_sender_dry_event(struct sctp_tcb *stcb, &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); } - void sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) { @@ -3677,8 +4110,6 @@ sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); } - - static void sctp_notify_stream_reset(struct sctp_tcb *stcb, int number_entries, uint16_t * list, int flag) @@ -3743,7 +4174,6 @@ sctp_notify_stream_reset(struct sctp_tcb *stcb, &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); } - static void sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) { @@ -3803,20 +4233,15 @@ sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_erro sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1, - SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); + SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); } else { sctp_m_freem(m_notify); } } - void sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, - uint32_t error, void *data, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ) + uint32_t error, void *data, int so_locked) { if ((stcb == NULL) || (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || @@ -3825,14 +4250,14 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, /* If the socket is gone we are out of here */ return; } -#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__) +#if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__) if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { #else if (stcb->sctp_socket->so_state & SS_CANTRCVMORE) { #endif return; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (so_locked) { sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep)); } else { @@ -3851,7 +4276,7 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, switch (notification) { case SCTP_NOTIFY_ASSOC_UP: if (stcb->asoc.assoc_up_sent == 0) { - sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); + sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, false, false, so_locked); stcb->asoc.assoc_up_sent = 1; } if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { @@ -3863,7 +4288,7 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, } break; case SCTP_NOTIFY_ASSOC_DOWN: - sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); + sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, false, false, so_locked); #if defined(__Userspace__) if (stcb->sctp_ep->recv_callback) { if (stcb->sctp_socket) { @@ -3931,21 +4356,29 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, case SCTP_NOTIFY_ASSOC_LOC_ABORTED: if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { - sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); + sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, false, so_locked); } else { - sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); + sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, false, so_locked); } break; case SCTP_NOTIFY_ASSOC_REM_ABORTED: if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { - sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); + sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, true, false, so_locked); } else { - sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); + sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, true, false, so_locked); + } + break; + case SCTP_NOTIFY_ASSOC_TIMEDOUT: + if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || + (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { + sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, false, true, so_locked); + } else { + sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, false, true, so_locked); } break; case SCTP_NOTIFY_ASSOC_RESTART: - sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); + sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, false, false, so_locked); if (stcb->asoc.auth_supported == 0) { sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, NULL, so_locked); @@ -4017,11 +4450,7 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, } void -sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ) +sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int so_locked) { struct sctp_association *asoc; struct sctp_stream_out *outs; @@ -4037,7 +4466,7 @@ sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, /* already being freed */ return; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (so_locked) { sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep)); } else { @@ -4050,9 +4479,6 @@ sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, return; } /* now through all the gunk freeing chunks */ - if (holds_lock == 0) { - SCTP_TCB_SEND_LOCK(stcb); - } /* sent queue SHOULD be empty */ TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); @@ -4129,24 +4555,17 @@ sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, /*sa_ignore FREED_MEMORY*/ } } - - if (holds_lock == 0) { - SCTP_TCB_SEND_UNLOCK(stcb); - } } void -sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, - struct sctp_abort_chunk *abort, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ) +sctp_abort_notification(struct sctp_tcb *stcb, bool from_peer, bool timeout, + uint16_t error, struct sctp_abort_chunk *abort, + int so_locked) { if (stcb == NULL) { return; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (so_locked) { sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep)); } else { @@ -4163,12 +4582,19 @@ sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { return; } + SCTP_TCB_SEND_LOCK(stcb); + SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); /* Tell them we lost the asoc */ - sctp_report_all_outbound(stcb, error, 0, so_locked); + sctp_report_all_outbound(stcb, error, so_locked); + SCTP_TCB_SEND_UNLOCK(stcb); if (from_peer) { sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); } else { - sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); + if (timeout) { + sctp_ulp_notify(SCTP_NOTIFY_ASSOC_TIMEDOUT, stcb, error, abort, so_locked); + } else { + sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); + } } } @@ -4177,32 +4603,41 @@ sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, struct mbuf *op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t mflowtype, uint32_t mflowid, #endif uint32_t vrf_id, uint16_t port) { - uint32_t vtag; -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif + struct sctp_gen_error_cause* cause; + uint32_t vtag; + uint16_t cause_code; - vtag = 0; if (stcb != NULL) { vtag = stcb->asoc.peer_vtag; vrf_id = stcb->asoc.vrf_id; + if (op_err != NULL) { + /* Read the cause code from the error cause. */ + cause = mtod(op_err, struct sctp_gen_error_cause *); + cause_code = ntohs(cause->code); + } else { + cause_code = 0; + } + } else { + vtag = 0; } sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, inp->fibnum, #endif vrf_id, port); if (stcb != NULL) { /* We have a TCB to abort, send notification too */ - sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); - SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); + sctp_abort_notification(stcb, false, false, cause_code, NULL, SCTP_SO_NOT_LOCKED); /* Ok, now lets free it */ -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(inp); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -4217,7 +4652,7 @@ sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, } (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif } @@ -4286,21 +4721,18 @@ sctp_print_out_track_log(struct sctp_tcb *stcb) void sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, - struct mbuf *op_err, - int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif -) + struct mbuf *op_err, bool timedout, int so_locked) { -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; #endif + struct sctp_gen_error_cause* cause; + uint16_t cause_code; -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) so = SCTP_INP_SO(inp); #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (so_locked) { sctp_lock_assert(SCTP_INP_SO(inp)); } else { @@ -4311,14 +4743,14 @@ sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, /* Got to have a TCB */ if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { if (LIST_EMPTY(&inp->sctp_asoc_list)) { -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (!so_locked) { SCTP_SOCKET_LOCK(so, 1); } #endif sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, SCTP_CALLED_DIRECTLY_NOCMPSET); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (!so_locked) { SCTP_SOCKET_UNLOCK(so, 1); } @@ -4326,8 +4758,13 @@ sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, } } return; + } + if (op_err != NULL) { + /* Read the cause code from the error cause. */ + cause = mtod(op_err, struct sctp_gen_error_cause *); + cause_code = ntohs(cause->code); } else { - SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED); + cause_code = 0; } /* notify the peer */ sctp_send_abort_tcb(stcb, op_err, so_locked); @@ -4338,13 +4775,13 @@ sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, } /* notify the ulp */ if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { - sctp_abort_notification(stcb, 0, 0, NULL, so_locked); + sctp_abort_notification(stcb, false, timedout, cause_code, NULL, so_locked); } /* now free the asoc */ #ifdef SCTP_ASOCLOG_OF_TSNS sctp_print_out_track_log(stcb); #endif -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) if (!so_locked) { atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -4355,7 +4792,7 @@ sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, #endif (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) if (!so_locked) { SCTP_SOCKET_UNLOCK(so, 1); } @@ -4367,7 +4804,7 @@ sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, struct sctp_inpcb *inp, struct mbuf *cause, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, #endif uint32_t vrf_id, uint16_t port) @@ -4380,12 +4817,12 @@ sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, /* Generate a TO address for future reference */ if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { if (LIST_EMPTY(&inp->sctp_asoc_list)) { -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1); #endif sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, SCTP_CALLED_DIRECTLY_NOCMPSET); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1); #endif } @@ -4417,7 +4854,7 @@ sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, return; case SCTP_SHUTDOWN_ACK: sctp_send_shutdown_complete2(src, dst, sh, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, fibnum, #endif vrf_id, port); @@ -4433,7 +4870,7 @@ sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && (contains_init_chunk == 0))) { sctp_send_abort(m, iphlen, src, dst, sh, 0, cause, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) mflowtype, mflowid, fibnum, #endif vrf_id, port); @@ -4445,7 +4882,7 @@ sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, * if there is return 1, else return 0. */ int -sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill) +sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtag) { struct sctp_chunkhdr *ch; struct sctp_init_chunk *init_chk, chunk_buf; @@ -4466,12 +4903,13 @@ sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill) /* yep, tell them */ return (1); } - if (ch->chunk_type == SCTP_INITIATION) { + if ((ch->chunk_type == SCTP_INITIATION) || + (ch->chunk_type == SCTP_INITIATION_ACK)) { /* need to update the Vtag */ init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, - offset, sizeof(*init_chk), (uint8_t *) & chunk_buf); + offset, sizeof(struct sctp_init_chunk), (uint8_t *) & chunk_buf); if (init_chk != NULL) { - *vtagfill = ntohl(init_chk->init.initiate_tag); + *vtag = ntohl(init_chk->init.initiate_tag); } } /* Nope, move to the next chunk */ @@ -4620,7 +5058,7 @@ void sctp_print_address(struct sockaddr *sa) { #ifdef INET6 -#if defined(__FreeBSD__) && __FreeBSD_version >= 700000 +#if defined(__FreeBSD__) && !defined(__Userspace__) char ip6buf[INET6_ADDRSTRLEN]; #endif #endif @@ -4645,7 +5083,7 @@ sctp_print_address(struct sockaddr *sa) ntohs(sin6->sin6_port), sin6->sin6_scope_id); #else -#if defined(__FreeBSD__) && __FreeBSD_version >= 700000 +#if defined(__FreeBSD__) && !defined(__Userspace__) SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", ip6_sprintf(ip6buf, &sin6->sin6_addr), ntohs(sin6->sin6_port), @@ -4703,23 +5141,21 @@ sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, struct sctp_queued_to_read *control, *nctl; struct sctp_readhead tmp_queue; struct mbuf *m; -#if defined(__FreeBSD__) || defined(__APPLE__) +#if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__) int error = 0; #endif old_so = old_inp->sctp_socket; new_so = new_inp->sctp_socket; TAILQ_INIT(&tmp_queue); -#if defined(__FreeBSD__) || defined(__APPLE__) -#if defined(__FreeBSD__) && __FreeBSD_version < 700000 - SOCKBUF_LOCK(&(old_so->so_rcv)); -#endif +#if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__) +#if defined(__FreeBSD__) + error = SOCK_IO_RECV_LOCK(old_so, waitflags); +#else error = sblock(&old_so->so_rcv, waitflags); -#if defined(__FreeBSD__) && __FreeBSD_version < 700000 - SOCKBUF_UNLOCK(&(old_so->so_rcv)); #endif if (error) { - /* Gak, can't get sblock, we have a problem. + /* Gak, can't get I/O lock, we have a problem. * data will be left stranded.. and we * don't dare look at it since the * other thread may be reading something. @@ -4754,19 +5190,12 @@ sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, } } SCTP_INP_READ_UNLOCK(old_inp); - /* Remove the sb-lock on the old socket */ -#if defined(__FreeBSD__) && __FreeBSD_version < 700000 - SOCKBUF_LOCK(&(old_so->so_rcv)); -#endif -#if defined(__APPLE__) + /* Remove the recv-lock on the old socket */ +#if defined(__APPLE__) && !defined(__Userspace__) sbunlock(&old_so->so_rcv, 1); #endif - -#if defined(__FreeBSD__) - sbunlock(&old_so->so_rcv); -#endif -#if defined(__FreeBSD__) && __FreeBSD_version < 700000 - SOCKBUF_UNLOCK(&(old_so->so_rcv)); +#if defined(__FreeBSD__) && !defined(__Userspace__) + SOCK_IO_RECV_UNLOCK(old_so); #endif /* Now we move them over to the new socket buffer */ SCTP_INP_READ_LOCK(new_inp); @@ -4791,13 +5220,13 @@ void sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) +#if !(defined(__APPLE__) && !defined(__Userspace__)) SCTP_UNUSED #endif ) { if ((inp != NULL) && (inp->sctp_socket != NULL)) { -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; so = SCTP_INP_SO(inp); @@ -4818,7 +5247,7 @@ sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, } #endif sctp_sorwakeup(inp, inp->sctp_socket); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) if (!so_locked) { SCTP_SOCKET_UNLOCK(so, 1); } @@ -4932,11 +5361,7 @@ sctp_add_to_readq(struct sctp_inpcb *inp, struct sockbuf *sb, int end, int inp_read_lock_held, - int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ) + int so_locked) { /* * Here we must place the control on the end of the socket read @@ -4952,7 +5377,7 @@ sctp_add_to_readq(struct sctp_inpcb *inp, #endif return; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (so_locked) { sctp_lock_assert(SCTP_INP_SO(inp)); } else { @@ -5119,7 +5544,6 @@ sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; } else { stcb->sctp_socket->so_snd.sb_cc = 0; - } } } @@ -5128,11 +5552,7 @@ sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, int sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, - uint8_t sent, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ) + uint8_t sent, int so_locked) { struct sctp_stream_out *strq; struct sctp_tmit_chunk *chk = NULL, *tp2; @@ -5144,7 +5564,7 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, int notdone; int do_wakeup_routine = 0; -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) if (so_locked) { sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep)); } else { @@ -5301,7 +5721,7 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, chk->rec.data.context = sp->context; chk->flags = sp->act_flags; chk->whoTo = NULL; -#if defined(__FreeBSD__) || defined(__Panda__) +#if defined(__FreeBSD__) && !defined(__Userspace__) chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); #else chk->rec.data.tsn = stcb->asoc.sending_seq++; @@ -5345,7 +5765,7 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, SCTP_TCB_SEND_UNLOCK(stcb); } if (do_wakeup_routine) { -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) struct socket *so; so = SCTP_INP_SO(stcb->sctp_ep); @@ -5363,7 +5783,7 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, } #endif sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) && !defined(__Userspace__) if (!so_locked) { SCTP_SOCKET_UNLOCK(so, 1); } @@ -5398,10 +5818,6 @@ sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, if (((struct sockaddr_in *)addr)->sin_addr.s_addr == laddr->ifa->address.sin.sin_addr.s_addr) { /* found him. */ - if (holds_lock == 0) { - SCTP_INP_RUNLOCK(inp); - } - return (laddr->ifa); break; } } @@ -5411,10 +5827,6 @@ sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, &laddr->ifa->address.sin6)) { /* found him. */ - if (holds_lock == 0) { - SCTP_INP_RUNLOCK(inp); - } - return (laddr->ifa); break; } } @@ -5423,10 +5835,6 @@ sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, if (addr->sa_family == AF_CONN) { if (((struct sockaddr_conn *)addr)->sconn_addr == laddr->ifa->address.sconn.sconn_addr) { /* found him. */ - if (holds_lock == 0) { - SCTP_INP_RUNLOCK(inp); - } - return (laddr->ifa); break; } } @@ -5435,7 +5843,11 @@ sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, if (holds_lock == 0) { SCTP_INP_RUNLOCK(inp); } - return (NULL); + if (laddr != NULL) { + return (laddr->ifa); + } else { + return (NULL); + } } uint32_t @@ -5458,7 +5870,7 @@ sctp_get_ifa_hash_val(struct sockaddr *addr) uint32_t hash_of_addr; sin6 = (struct sockaddr_in6 *)addr; -#if !defined(__Windows__) && !defined(__Userspace_os_FreeBSD) && !defined(__Userspace_os_Darwin) && !defined(__Userspace_os_Windows) +#if !defined(_WIN32) && !(defined(__FreeBSD__) && defined(__Userspace__)) && !defined(__APPLE__) hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + sin6->sin6_addr.s6_addr32[1] + sin6->sin6_addr.s6_addr32[2] + @@ -5498,8 +5910,11 @@ sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) struct sctp_ifalist *hash_head; uint32_t hash_of_addr; - if (holds_lock == 0) + if (holds_lock == 0) { SCTP_IPI_ADDR_RLOCK(); + } else { + SCTP_IPI_ADDR_LOCK_ASSERT(); + } vrf = sctp_find_vrf(vrf_id); if (vrf == NULL) { @@ -5530,9 +5945,6 @@ sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) if (((struct sockaddr_in *)addr)->sin_addr.s_addr == sctp_ifap->address.sin.sin_addr.s_addr) { /* found him. */ - if (holds_lock == 0) - SCTP_IPI_ADDR_RUNLOCK(); - return (sctp_ifap); break; } } @@ -5542,9 +5954,6 @@ sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, &sctp_ifap->address.sin6)) { /* found him. */ - if (holds_lock == 0) - SCTP_IPI_ADDR_RUNLOCK(); - return (sctp_ifap); break; } } @@ -5553,9 +5962,6 @@ sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) if (addr->sa_family == AF_CONN) { if (((struct sockaddr_conn *)addr)->sconn_addr == sctp_ifap->address.sconn.sconn_addr) { /* found him. */ - if (holds_lock == 0) - SCTP_IPI_ADDR_RUNLOCK(); - return (sctp_ifap); break; } } @@ -5563,7 +5969,7 @@ sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) } if (holds_lock == 0) SCTP_IPI_ADDR_RUNLOCK(); - return (NULL); + return (sctp_ifap); } static void @@ -5571,6 +5977,9 @@ sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, uint32_t rwnd_req) { /* User pulled some data, do we need a rwnd update? */ +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct epoch_tracker et; +#endif int r_unlocked = 0; uint32_t dif, rwnd; struct socket *so = NULL; @@ -5581,7 +5990,7 @@ sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, atomic_add_int(&stcb->asoc.refcnt, 1); if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || - (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { + (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) { /* Pre-check If we are freeing no update */ goto no_lock; } @@ -5626,11 +6035,17 @@ sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock, goto out; } SCTP_STAT_INCR(sctps_wu_sacks_sent); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif sctp_send_sack(stcb, SCTP_SO_LOCKED); sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); /* make sure no timer is running */ +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6); SCTP_TCB_UNLOCK(stcb); @@ -5687,7 +6102,7 @@ sctp_sorecvmsg(struct socket *so, int hold_rlock = 0; ssize_t slen = 0; uint32_t held_length = 0; -#if defined(__FreeBSD__) && __FreeBSD_version >= 700000 +#if defined(__FreeBSD__) && !defined(__Userspace__) int sockbuf_lock = 0; #endif @@ -5703,7 +6118,7 @@ sctp_sorecvmsg(struct socket *so, } else { in_flags = 0; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) slen = uio->uio_resid; #else @@ -5723,7 +6138,7 @@ sctp_sorecvmsg(struct socket *so, return (EINVAL); } if ((in_flags & (MSG_DONTWAIT -#if defined(__FreeBSD__) && __FreeBSD_version > 500000 +#if defined(__FreeBSD__) && !defined(__Userspace__) | MSG_NBIO #endif )) || @@ -5742,7 +6157,7 @@ sctp_sorecvmsg(struct socket *so, rwnd_req = SCTP_MIN_RWND; in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) sctp_misc_ints(SCTP_SORECV_ENTER, rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid); @@ -5755,12 +6170,12 @@ sctp_sorecvmsg(struct socket *so, rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid); #endif } -#if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__) +#if defined(__Userspace__) SOCKBUF_LOCK(&so->so_rcv); hold_sblock = 1; #endif if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) { -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) sctp_misc_ints(SCTP_SORECV_ENTERPL, rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid); @@ -5774,34 +6189,29 @@ sctp_sorecvmsg(struct socket *so, #endif } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags)); #endif - -#if defined(__FreeBSD__) - error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); +#if defined(__FreeBSD__) && !defined(__Userspace__) + error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(in_flags)); #endif if (error) { goto release_unlocked; } -#if defined(__FreeBSD__) && __FreeBSD_version >= 700000 - sockbuf_lock = 1; +#if defined(__FreeBSD__) && !defined(__Userspace__) + sockbuf_lock = 1; #endif restart: -#if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__) +#if defined(__Userspace__) if (hold_sblock == 0) { SOCKBUF_LOCK(&so->so_rcv); hold_sblock = 1; } #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) sbunlock(&so->so_rcv, 1); #endif -#if defined(__FreeBSD__) && __FreeBSD_version < 700000 - sbunlock(&so->so_rcv); -#endif - restart_nosblocks: if (hold_sblock == 0) { SOCKBUF_LOCK(&so->so_rcv); @@ -5811,7 +6221,7 @@ sctp_sorecvmsg(struct socket *so, (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { goto out; } -#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__) +#if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__) if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { #else if ((so->so_state & SS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { @@ -5879,11 +6289,8 @@ sctp_sorecvmsg(struct socket *so, SOCKBUF_UNLOCK(&so->so_rcv); hold_sblock = 0; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags)); -#endif -#if defined(__FreeBSD__) && __FreeBSD_version < 700000 - error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0)); #endif /* we possibly have data we can read */ /*sa_ignore FREED_MEMORY*/ @@ -6236,7 +6643,7 @@ sctp_sorecvmsg(struct socket *so, m = control->data; while (m) { /* Move out all we can */ -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) cp_len = uio->uio_resid; #else @@ -6254,12 +6661,12 @@ sctp_sorecvmsg(struct socket *so, SCTP_INP_READ_UNLOCK(inp); hold_rlock = 0; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 0); #endif if (cp_len > 0) error = uiomove(mtod(m, char *), (int)cp_len, uio); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_LOCK(so, 0); #endif /* re-read */ @@ -6308,13 +6715,13 @@ sctp_sorecvmsg(struct socket *so, copied_so_far += cp_len; freed_so_far += (uint32_t)cp_len; freed_so_far += MSIZE; - atomic_subtract_int(&control->length, cp_len); + atomic_subtract_int(&control->length, (int)cp_len); control->data = sctp_m_free(m); m = control->data; /* been through it all, must hold sb lock ok to null tail */ if (control->data == NULL) { #ifdef INVARIANTS -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if ((control->end_added == 0) || (TAILQ_NEXT(control, next) == NULL)) { /* If the end is not added, OR the @@ -6345,10 +6752,10 @@ sctp_sorecvmsg(struct socket *so, if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, (int)cp_len); } - atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); + atomic_subtract_int(&so->so_rcv.sb_cc, (int)cp_len); if ((control->do_not_ref_stcb == 0) && stcb) { - atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); + atomic_subtract_int(&stcb->asoc.sb_cc, (int)cp_len); } copied_so_far += cp_len; freed_so_far += (uint32_t)cp_len; @@ -6357,12 +6764,12 @@ sctp_sorecvmsg(struct socket *so, sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0); } - atomic_subtract_int(&control->length, cp_len); + atomic_subtract_int(&control->length, (int)cp_len); } else { copied_so_far += cp_len; } } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { #else @@ -6448,7 +6855,7 @@ sctp_sorecvmsg(struct socket *so, if (out_flags & MSG_EOR) { goto release; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) if ((uio->uio_resid == 0) || #else @@ -6470,10 +6877,11 @@ sctp_sorecvmsg(struct socket *so, goto release; } /* - * We need to wait for more data a few things: - We don't - * sbunlock() so we don't get someone else reading. - We - * must be sure to account for the case where what is added - * is NOT to our control when we wakeup. + * We need to wait for more data a few things: + * - We don't release the I/O lock so we don't get someone else + * reading. + * - We must be sure to account for the case where what is added + * is NOT to our control when we wakeup. */ /* Do we need to tell the transport a rwnd update might be @@ -6486,7 +6894,7 @@ sctp_sorecvmsg(struct socket *so, sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); } wait_some_more: -#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__) +#if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__) if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { goto release; } @@ -6511,13 +6919,13 @@ sctp_sorecvmsg(struct socket *so, (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { goto release; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) sbunlock(&so->so_rcv, 1); #endif if (so->so_rcv.sb_cc <= control->held_length) { error = sbwait(&so->so_rcv); if (error) { -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) goto release; #else goto release_unlocked; @@ -6525,7 +6933,7 @@ sctp_sorecvmsg(struct socket *so, } control->held_length = 0; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags)); #endif if (hold_sblock) { @@ -6597,7 +7005,7 @@ sctp_sorecvmsg(struct socket *so, if (control->spec_flags & M_NOTIFICATION) { out_flags |= MSG_NOTIFICATION; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) uio->uio_resid = control->length; #else @@ -6634,7 +7042,7 @@ sctp_sorecvmsg(struct socket *so, SCTP_INP_READ_UNLOCK(inp); hold_rlock = 0; } -#if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__) +#if defined(__Userspace__) if (hold_sblock == 0) { SOCKBUF_LOCK(&so->so_rcv); hold_sblock = 1; @@ -6645,15 +7053,13 @@ sctp_sorecvmsg(struct socket *so, hold_sblock = 0; } #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) sbunlock(&so->so_rcv, 1); #endif -#if defined(__FreeBSD__) - sbunlock(&so->so_rcv); -#if defined(__FreeBSD__) && __FreeBSD_version >= 700000 +#if defined(__FreeBSD__) && !defined(__Userspace__) + SOCK_IO_RECV_UNLOCK(so); sockbuf_lock = 0; -#endif #endif release_unlocked: @@ -6686,9 +7092,9 @@ sctp_sorecvmsg(struct socket *so, if (hold_sblock) { SOCKBUF_UNLOCK(&so->so_rcv); } -#if defined(__FreeBSD__) && __FreeBSD_version >= 700000 +#if defined(__FreeBSD__) && !defined(__Userspace__) if (sockbuf_lock) { - sbunlock(&so->so_rcv); + SOCK_IO_RECV_UNLOCK(so); } #endif @@ -6715,7 +7121,7 @@ sctp_sorecvmsg(struct socket *so, if (stcb) { sctp_misc_ints(SCTP_SORECV_DONE, freed_so_far, -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) ((uio) ? (slen - uio->uio_resid) : slen), #else @@ -6729,7 +7135,7 @@ sctp_sorecvmsg(struct socket *so, } else { sctp_misc_ints(SCTP_SORECV_DONE, freed_so_far, -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) #if defined(APPLE_LEOPARD) ((uio) ? (slen - uio->uio_resid) : slen), #else @@ -6749,7 +7155,6 @@ sctp_sorecvmsg(struct socket *so, return (error); } - #ifdef SCTP_MBUF_LOGGING struct mbuf * sctp_m_free(struct mbuf *m) @@ -6778,7 +7183,7 @@ sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) struct sctp_ifa *ifa; struct sctp_laddr *wi; - ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0); + ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED); if (ifa == NULL) { SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); return (EADDRNOTAVAIL); @@ -6817,7 +7222,6 @@ sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) #if defined(__Userspace__) /* no sctp_soreceive for __Userspace__ now */ #endif - #if !defined(__Userspace__) int sctp_soreceive( struct socket *so, @@ -6859,7 +7263,7 @@ sctp_soreceive( struct socket *so, fromlen = 0; } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_LOCK(so, 1); #endif if (filling_sinfo) { @@ -6891,7 +7295,7 @@ sctp_soreceive( struct socket *so, #else if (from) { #endif -#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__) +#if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__) *psa = sodupsockaddr(from, M_NOWAIT); #else *psa = dup_sockaddr(from, mp0 == 0); @@ -6900,14 +7304,13 @@ sctp_soreceive( struct socket *so, *psa = NULL; } } -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) SCTP_SOCKET_UNLOCK(so, 1); #endif return (error); } - -#if (defined(__FreeBSD__) && __FreeBSD_version < 603000) || defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) /* * General routine to allocate a hash table with control of memory flags. * is in 7.0 and beyond for sure :-) @@ -6954,7 +7357,6 @@ sctp_hashinit_flags(int elements, struct malloc_type *type, return (hashtbl); } #endif - #else /* __Userspace__ ifdef above sctp_soreceive */ /* * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland. @@ -7005,7 +7407,6 @@ sctp_hashinit_flags(int elements, struct malloc_type *type, return (hashtbl); } - void sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask) { @@ -7020,7 +7421,6 @@ sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask) FREE(hashtbl, type); } - void sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask) { @@ -7045,10 +7445,7 @@ sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask) FREE(hashtbl, type); } - #endif - - int sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, int totaddr, int *error) @@ -7184,17 +7581,17 @@ sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, { struct sockaddr_in6 *sin6; - sin6 = (struct sockaddr_in6 *)sa; - if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { - /* Must be non-mapped for connectx */ - return (EINVAL); - } incr = (unsigned int)sizeof(struct sockaddr_in6); #ifdef HAVE_SA_LEN if (sa->sa_len != incr) { return (EINVAL); } #endif + sin6 = (struct sockaddr_in6 *)sa; + if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { + /* Must be non-mapped for connectx */ + return (EINVAL); + } (*num_v6) += 1; break; } @@ -7225,16 +7622,24 @@ sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, */ void sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, - struct sockaddr *sa, sctp_assoc_t assoc_id, - uint32_t vrf_id, int *error, void *p) + struct sockaddr *sa, uint32_t vrf_id, int *error, + void *p) { - struct sockaddr *addr_touse; #if defined(INET) && defined(INET6) struct sockaddr_in sin; #endif -#ifdef SCTP_MVRF - int i, fnd = 0; +#ifdef INET6 + struct sockaddr_in6 *sin6; #endif +#ifdef INET + struct sockaddr_in *sinp; +#endif + struct sockaddr *addr_to_use; + struct sctp_inpcb *lep; +#ifdef SCTP_MVRF + int i; +#endif + uint16_t port; /* see if we're bound all already! */ if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { @@ -7246,23 +7651,18 @@ sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, /* Is the VRF one we have */ for (i = 0; i < inp->num_vrfs; i++) { if (vrf_id == inp->m_vrf_ids[i]) { - fnd = 1; break; } } - if (!fnd) { + if (i == inp->num_vrfs) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); *error = EINVAL; return; } #endif - addr_touse = sa; + switch (sa->sa_family) { #ifdef INET6 - if (sa->sa_family == AF_INET6) { -#ifdef INET - struct sockaddr_in6 *sin6; - -#endif + case AF_INET6: #ifdef HAVE_SA_LEN if (sa->sa_len != sizeof(struct sockaddr_in6)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); @@ -7276,8 +7676,9 @@ sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, *error = EINVAL; return; } + sin6 = (struct sockaddr_in6 *)sa; + port = sin6->sin6_port; #ifdef INET - sin6 = (struct sockaddr_in6 *)addr_touse; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && SCTP_IPV6_V6ONLY(inp)) { @@ -7287,13 +7688,17 @@ sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, return; } in6_sin6_2_sin(&sin, sin6); - addr_touse = (struct sockaddr *)&sin; + addr_to_use = (struct sockaddr *)&sin; + } else { + addr_to_use = sa; } +#else + addr_to_use = sa; #endif - } + break; #endif #ifdef INET - if (sa->sa_family == AF_INET) { + case AF_INET: #ifdef HAVE_SA_LEN if (sa->sa_len != sizeof(struct sockaddr_in)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); @@ -7308,10 +7713,18 @@ sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, *error = EINVAL; return; } - } + sinp = (struct sockaddr_in *)sa; + port = sinp->sin_port; + addr_to_use = sa; + break; #endif + default: + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); + *error = EINVAL; + return; + } if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { -#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)) +#if !(defined(_WIN32) || defined(__Userspace__)) if (p == NULL) { /* Can't get proc for Net/Open BSD */ SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); @@ -7319,58 +7732,25 @@ sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, return; } #endif - *error = sctp_inpcb_bind(so, addr_touse, NULL, p); + *error = sctp_inpcb_bind(so, addr_to_use, NULL, p); return; } - /* - * No locks required here since bind and mgmt_ep_sa - * all do their own locking. If we do something for - * the FIX: below we may need to lock in that case. - */ - if (assoc_id == 0) { + /* Validate the incoming port. */ + if ((port != 0) && (port != inp->sctp_lport)) { + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); + *error = EINVAL; + return; + } + lep = sctp_pcb_findep(addr_to_use, 1, 0, vrf_id); + if (lep == NULL) { /* add the address */ - struct sctp_inpcb *lep; - struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse; - - /* validate the incoming port */ - if ((lsin->sin_port != 0) && - (lsin->sin_port != inp->sctp_lport)) { - SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); - *error = EINVAL; - return; - } else { - /* user specified 0 port, set it to existing port */ - lsin->sin_port = inp->sctp_lport; - } - - lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id); - if (lep != NULL) { - /* - * We must decrement the refcount - * since we have the ep already and - * are binding. No remove going on - * here. - */ - SCTP_INP_DECR_REF(lep); - } - if (lep == inp) { - /* already bound to it.. ok */ - return; - } else if (lep == NULL) { - ((struct sockaddr_in *)addr_touse)->sin_port = 0; - *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, - SCTP_ADD_IP_ADDRESS, - vrf_id, NULL); - } else { + *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, + SCTP_ADD_IP_ADDRESS, vrf_id); + } else { + if (lep != inp) { *error = EADDRINUSE; } - if (*error) - return; - } else { - /* - * FIX: decide whether we allow assoc based - * bindx - */ + SCTP_INP_DECR_REF(lep); } } @@ -7380,15 +7760,15 @@ sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, */ void sctp_bindx_delete_address(struct sctp_inpcb *inp, - struct sockaddr *sa, sctp_assoc_t assoc_id, - uint32_t vrf_id, int *error) + struct sockaddr *sa, uint32_t vrf_id, int *error) { - struct sockaddr *addr_touse; + struct sockaddr *addr_to_use; #if defined(INET) && defined(INET6) + struct sockaddr_in6 *sin6; struct sockaddr_in sin; #endif #ifdef SCTP_MVRF - int i, fnd = 0; + int i; #endif /* see if we're bound all already! */ @@ -7401,23 +7781,18 @@ sctp_bindx_delete_address(struct sctp_inpcb *inp, /* Is the VRF one we have */ for (i = 0; i < inp->num_vrfs; i++) { if (vrf_id == inp->m_vrf_ids[i]) { - fnd = 1; break; } } - if (!fnd) { + if (i == inp->num_vrfs) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); *error = EINVAL; return; } #endif - addr_touse = sa; + switch (sa->sa_family) { #ifdef INET6 - if (sa->sa_family == AF_INET6) { -#ifdef INET - struct sockaddr_in6 *sin6; -#endif - + case AF_INET6: #ifdef HAVE_SA_LEN if (sa->sa_len != sizeof(struct sockaddr_in6)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); @@ -7432,7 +7807,7 @@ sctp_bindx_delete_address(struct sctp_inpcb *inp, return; } #ifdef INET - sin6 = (struct sockaddr_in6 *)addr_touse; + sin6 = (struct sockaddr_in6 *)sa; if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && SCTP_IPV6_V6ONLY(inp)) { @@ -7442,13 +7817,17 @@ sctp_bindx_delete_address(struct sctp_inpcb *inp, return; } in6_sin6_2_sin(&sin, sin6); - addr_touse = (struct sockaddr *)&sin; + addr_to_use = (struct sockaddr *)&sin; + } else { + addr_to_use = sa; } +#else + addr_to_use = sa; #endif - } + break; #endif #ifdef INET - if (sa->sa_family == AF_INET) { + case AF_INET: #ifdef HAVE_SA_LEN if (sa->sa_len != sizeof(struct sockaddr_in)) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); @@ -7463,24 +7842,17 @@ sctp_bindx_delete_address(struct sctp_inpcb *inp, *error = EINVAL; return; } - } + addr_to_use = sa; + break; #endif - /* - * No lock required mgmt_ep_sa does its own locking. - * If the FIX: below is ever changed we may need to - * lock before calling association level binding. - */ - if (assoc_id == 0) { - /* delete the address */ - *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, - SCTP_DEL_IP_ADDRESS, - vrf_id, NULL); - } else { - /* - * FIX: decide whether we allow assoc based - * bindx - */ + default: + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); + *error = EINVAL; + return; } + /* No lock required mgmt_ep_sa does its own locking. */ + *error = sctp_addr_mgmt_ep_sa(inp, addr_to_use, SCTP_DEL_IP_ADDRESS, + vrf_id); } /* @@ -7494,7 +7866,7 @@ sctp_local_addr_count(struct sctp_tcb *stcb) #if defined(INET) int ipv4_local_scope, ipv4_addr_legal; #endif -#if defined (INET6) +#if defined(INET6) int local_scope, site_scope, ipv6_addr_legal; #endif #if defined(__Userspace__) @@ -7550,7 +7922,7 @@ sctp_local_addr_count(struct sctp_tcb *stcb) /* skip unspecified addrs */ continue; } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, &sin->sin_addr) != 0) { continue; @@ -7579,7 +7951,7 @@ sctp_local_addr_count(struct sctp_tcb *stcb) if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { continue; } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, &sin6->sin6_addr) != 0) { continue; @@ -7661,7 +8033,7 @@ sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_ { uint32_t saveindex, newindex; -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) if (SCTP_BASE_SYSCTL(sctp_log) == NULL) { return; } @@ -7708,8 +8080,7 @@ sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_ } #endif -#if defined(__FreeBSD__) -#if __FreeBSD_version >= 800044 +#if defined(__FreeBSD__) && !defined(__Userspace__) static void sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED) @@ -7763,29 +8134,17 @@ sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, * Since CSUM_DATA_VALID == CSUM_SCTP_VALID this would imply that * the HW also verified the SCTP checksum. Therefore, clear the bit. */ -#if __FreeBSD_version > 1000049 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n", m->m_pkthdr.len, if_name(m->m_pkthdr.rcvif), (int)m->m_pkthdr.csum_flags, CSUM_BITS); -#else - SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, - "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%x.\n", - m->m_pkthdr.len, - if_name(m->m_pkthdr.rcvif), - m->m_pkthdr.csum_flags); -#endif m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; iph = mtod(m, struct ip *); switch (iph->ip_v) { #ifdef INET case IPVERSION: -#if __FreeBSD_version >= 1000000 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); -#else - iph->ip_len -= sizeof(struct udphdr); -#endif sctp_input_with_port(m, off, port); break; #endif @@ -7804,9 +8163,7 @@ sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp, out: m_freem(m); } -#endif -#if __FreeBSD_version >= 1100000 #ifdef INET static void sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED) @@ -7926,16 +8283,6 @@ sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ct } #endif } else { -#if defined(__FreeBSD__) && __FreeBSD_version < 500000 - /* - * XXX must be fixed for 5.x and higher, leave for - * 4.x - */ - if (PRC_IS_REDIRECT(cmd) && (inp != NULL)) { - in_rtchange((struct inpcb *)inp, - inetctlerrmap[cmd]); - } -#endif if ((stcb == NULL) && (inp != NULL)) { /* reduce ref-count */ SCTP_INP_WLOCK(inp); @@ -7997,7 +8344,7 @@ sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx #endif src.sin6_port = sh.src_port; src.sin6_addr = ip6cp->ip6c_ip6->ip6_src; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { return; } @@ -8009,7 +8356,7 @@ sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx #endif dst.sin6_port = sh.dest_port; dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst; -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) { return; } @@ -8040,7 +8387,7 @@ sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx return; } } else { -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) if (ip6cp->ip6c_m->m_pkthdr.len >= ip6cp->ip6c_off + sizeof(struct udphdr) + sizeof(struct sctphdr) + @@ -8109,11 +8456,6 @@ sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx } #endif } else { -#if defined(__FreeBSD__) && __FreeBSD_version < 500000 - if (PRC_IS_REDIRECT(cmd) && (inp != NULL)) { - in6_rtchange(inp, inet6ctlerrmap[cmd]); - } -#endif if ((stcb == NULL) && (inp != NULL)) { /* reduce inp's ref-count */ SCTP_INP_WLOCK(inp); @@ -8126,7 +8468,6 @@ sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx } } #endif -#endif void sctp_over_udp_stop(void) @@ -8151,7 +8492,6 @@ sctp_over_udp_stop(void) int sctp_over_udp_start(void) { -#if __FreeBSD_version >= 800044 uint16_t port; int ret; #ifdef INET @@ -8190,9 +8530,7 @@ sctp_over_udp_start(void) /* Call the special UDP hook. */ if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), sctp_recv_udp_tunneled_packet, -#if __FreeBSD_version >= 1100000 sctp_recv_icmp_tunneled_packet, -#endif NULL))) { sctp_over_udp_stop(); return (ret); @@ -8218,9 +8556,7 @@ sctp_over_udp_start(void) /* Call the special UDP hook. */ if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), sctp_recv_udp_tunneled_packet, -#if __FreeBSD_version >= 1100000 sctp_recv_icmp6_tunneled_packet, -#endif NULL))) { sctp_over_udp_stop(); return (ret); @@ -8237,9 +8573,6 @@ sctp_over_udp_start(void) } #endif return (0); -#else - return (ENOTSUP); -#endif } #endif @@ -8277,7 +8610,7 @@ sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3) } } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) void sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu) { diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctputil.h b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctputil.h index 2b691cff7..d71494acc 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet/sctputil.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet/sctputil.h @@ -32,9 +32,9 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet/sctputil.h 352592 2019-09-22 10:40:15Z tuexen $"); +__FBSDID("$FreeBSD$"); #endif #ifndef _NETINET_SCTP_UTIL_H_ @@ -57,14 +57,13 @@ void sctp_m_freem(struct mbuf *m); #define sctp_m_freem m_freem #endif -#if defined(SCTP_LOCAL_TRACE_BUF) || defined(__APPLE__) +#if defined(SCTP_LOCAL_TRACE_BUF) void sctp_log_trace(uint32_t fr, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f); #endif #define sctp_get_associd(stcb) ((sctp_assoc_t)stcb->asoc.assoc_id) - /* * Function prototypes */ @@ -84,7 +83,7 @@ uint32_t sctp_select_initial_TSN(struct sctp_pcb *); uint32_t sctp_select_a_tag(struct sctp_inpcb *, uint16_t lport, uint16_t rport, int); -int sctp_init_asoc(struct sctp_inpcb *, struct sctp_tcb *, uint32_t, uint32_t, uint16_t); +int sctp_init_asoc(struct sctp_inpcb *, struct sctp_tcb *, uint32_t, uint32_t, uint32_t, uint16_t); void sctp_fill_random_store(struct sctp_pcb *); @@ -94,6 +93,14 @@ sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, void sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag); +/* + * NOTE: sctp_timer_start() will increment the reference count of any relevant + * structure the timer is referencing, in order to prevent a race condition + * between the timer executing and the structure being freed. + * + * When the timer fires or sctp_timer_stop() is called, these references are + * removed. + */ void sctp_timer_start(int, struct sctp_inpcb *, struct sctp_tcb *, struct sctp_nets *); @@ -111,7 +118,7 @@ sctp_mtu_size_reset(struct sctp_inpcb *, struct sctp_association *, uint32_t); void sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) +#if !(defined(__APPLE__) && !defined(__Userspace__)) SCTP_UNUSED #endif ); @@ -130,11 +137,7 @@ sctp_add_to_readq(struct sctp_inpcb *inp, struct sockbuf *sb, int end, int inpread_locked, - int so_locked -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ); + int so_locked); void sctp_iterator_worker(void); @@ -162,60 +165,45 @@ sctp_add_pad_tombuf(struct mbuf *, int); struct mbuf * sctp_pad_lastmbuf(struct mbuf *, int, struct mbuf *); -void sctp_ulp_notify(uint32_t, struct sctp_tcb *, uint32_t, void *, int -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ); +void sctp_ulp_notify(uint32_t, struct sctp_tcb *, uint32_t, void *, int); void sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp, struct sctp_tcb *stcb, int waitflags); - void sctp_stop_timers_for_shutdown(struct sctp_tcb *); -void sctp_report_all_outbound(struct sctp_tcb *, uint16_t, int, int -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ); +/* Stop all timers for association and remote addresses. */ +void sctp_stop_association_timers(struct sctp_tcb *, bool); + +void sctp_report_all_outbound(struct sctp_tcb *, uint16_t, int); int sctp_expand_mapping_array(struct sctp_association *, uint32_t); -void sctp_abort_notification(struct sctp_tcb *, uint8_t, uint16_t, - struct sctp_abort_chunk *, int -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif - ); +void sctp_abort_notification(struct sctp_tcb *, bool, bool, uint16_t, + struct sctp_abort_chunk *, int); /* We abort responding to an IP packet for some reason */ void sctp_abort_association(struct sctp_inpcb *, struct sctp_tcb *, struct mbuf *, int, struct sockaddr *, struct sockaddr *, struct sctphdr *, struct mbuf *, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t, uint32_t, #endif uint32_t, uint16_t); - /* We choose to abort via user input */ void sctp_abort_an_association(struct sctp_inpcb *, struct sctp_tcb *, - struct mbuf *, int -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif -); + struct mbuf *, bool, int); void sctp_handle_ootb(struct mbuf *, int, int, struct sockaddr *, struct sockaddr *, struct sctphdr *, struct sctp_inpcb *, struct mbuf *, -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) uint8_t, uint32_t, uint16_t, #endif uint32_t, uint16_t); @@ -276,21 +264,16 @@ void sctp_print_address(struct sockaddr *); int sctp_release_pr_sctp_chunk(struct sctp_tcb *, struct sctp_tmit_chunk *, - uint8_t, int -#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) - SCTP_UNUSED -#endif -); + uint8_t, int); struct mbuf *sctp_generate_cause(uint16_t, char *); struct mbuf *sctp_generate_no_user_data_cause(uint32_t); void sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, - struct sockaddr *sa, sctp_assoc_t assoc_id, - uint32_t vrf_id, int *error, void *p); -void sctp_bindx_delete_address(struct sctp_inpcb *inp, - struct sockaddr *sa, sctp_assoc_t assoc_id, - uint32_t vrf_id, int *error); + struct sockaddr *sa, uint32_t vrf_id, int *error, + void *p); +void sctp_bindx_delete_address(struct sctp_inpcb *inp, struct sockaddr *sa, + uint32_t vrf_id, int *error); int sctp_local_addr_count(struct sctp_tcb *stcb); @@ -352,11 +335,11 @@ do { \ } while (0) /* functions to start/stop udp tunneling */ -#if defined(__APPLE__) || defined(__FreeBSD__) +#if (defined(__APPLE__) || defined(__FreeBSD__)) && !defined(__Userspace__) void sctp_over_udp_stop(void); int sctp_over_udp_start(void); #endif -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) void sctp_over_udp_restart(void); #endif @@ -378,7 +361,6 @@ void sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t, uint16_t, uint16_t, void sctp_log_nagle_event(struct sctp_tcb *stcb, int action); - #ifdef SCTP_MBUF_LOGGING void sctp_log_mb(struct mbuf *m, int from); @@ -412,7 +394,6 @@ void sctp_log_map(uint32_t, uint32_t, uint32_t, int); void sctp_print_mapping_array(struct sctp_association *asoc); void sctp_clr_stat_log(void); - #ifdef SCTP_AUDITING_ENABLED void sctp_auditing(int, struct sctp_inpcb *, struct sctp_tcb *, @@ -421,11 +402,16 @@ void sctp_audit_log(uint8_t, uint8_t); #endif uint32_t sctp_min_mtu(uint32_t, uint32_t, uint32_t); -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) && !defined(__Userspace__) void sctp_hc_set_mtu(union sctp_sockstore *, uint16_t, uint32_t); uint32_t sctp_hc_get_mtu(union sctp_sockstore *, uint16_t); #endif void sctp_set_state(struct sctp_tcb *, int); void sctp_add_substate(struct sctp_tcb *, int); +uint32_t sctp_ticks_to_msecs(uint32_t); +uint32_t sctp_msecs_to_ticks(uint32_t); +uint32_t sctp_ticks_to_secs(uint32_t); +uint32_t sctp_secs_to_ticks(uint32_t); + #endif /* _KERNEL */ #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet6/sctp6_usrreq.c b/TMessagesProj/jni/third_party/usrsctplib/netinet6/sctp6_usrreq.c index 23b1813e7..b62354b2b 100644 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet6/sctp6_usrreq.c +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet6/sctp6_usrreq.c @@ -32,14 +32,14 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include -__FBSDID("$FreeBSD: head/sys/netinet6/sctp6_usrreq.c 355264 2019-12-01 16:14:44Z tuexen $"); +__FBSDID("$FreeBSD$"); #endif #include #ifdef INET6 -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include #endif #include @@ -58,14 +58,11 @@ __FBSDID("$FreeBSD: head/sys/netinet6/sctp6_usrreq.c 355264 2019-12-01 16:14:44Z #include #include #include -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) #include #include #endif -#if defined(__APPLE__) -#define APPLE_FILE_NO 9 -#endif -#if defined(__Panda__) || defined(__Userspace__) +#if defined(__Userspace__) int ip6_v6only=0; #endif #if defined(__Userspace__) @@ -73,7 +70,7 @@ int ip6_v6only=0; void in6_sin6_2_sin(struct sockaddr_in *sin, struct sockaddr_in6 *sin6) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) uint32_t temp; #endif memset(sin, 0, sizeof(*sin)); @@ -82,7 +79,7 @@ in6_sin6_2_sin(struct sockaddr_in *sin, struct sockaddr_in6 *sin6) #endif sin->sin_family = AF_INET; sin->sin_port = sin6->sin6_port; -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) temp = sin6->sin6_addr.s6_addr16[7]; temp = temp << 16; temp = temp | sin6->sin6_addr.s6_addr16[6]; @@ -108,20 +105,20 @@ void in6_sin_2_v4mapsin6(const struct sockaddr_in *sin, struct sockaddr_in6 *sin6) { memset(sin6, 0, sizeof(struct sockaddr_in6)); - sin6->sin6_family = AF_INET6; + sin6->sin6_family = AF_INET6; #ifdef HAVE_SIN6_LEN sin6->sin6_len = sizeof(struct sockaddr_in6); #endif sin6->sin6_port = sin->sin_port; -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) ((uint32_t *)&sin6->sin6_addr)[0] = 0; ((uint32_t *)&sin6->sin6_addr)[1] = 0; ((uint32_t *)&sin6->sin6_addr)[2] = htonl(0xffff); ((uint32_t *)&sin6->sin6_addr)[3] = sin->sin_addr.s_addr; #else - sin6->sin6_addr.s6_addr32[0] = 0; + sin6->sin6_addr.s6_addr32[0] = 0; sin6->sin6_addr.s6_addr32[1] = 0; - sin6->sin6_addr.s6_addr32[2] = htonl(0xffff); + sin6->sin6_addr.s6_addr32[2] = htonl(0xffff); sin6->sin6_addr.s6_addr32[3] = sin->sin_addr.s_addr; #endif } @@ -132,8 +129,6 @@ in6_sin_2_v4mapsin6(const struct sockaddr_in *sin, struct sockaddr_in6 *sin6) int #if defined(__APPLE__) || defined(__FreeBSD__) sctp6_input_with_port(struct mbuf **i_pak, int *offp, uint16_t port) -#elif defined( __Panda__) -sctp6_input(pakhandle_type *i_pak) #else sctp6_input(struct mbuf **i_pak, int *offp, int proto) #endif @@ -153,25 +148,16 @@ sctp6_input(struct mbuf **i_pak, int *offp, int proto) uint8_t mflowtype; uint16_t fibnum; #endif -#if !(defined(__APPLE__) || defined (__FreeBSD__)) +#if !(defined(__APPLE__) || defined(__FreeBSD__)) uint16_t port = 0; #endif -#if defined(__Panda__) - /* This is Evil, but its the only way to make panda work right. */ - iphlen = sizeof(struct ip6_hdr); -#else iphlen = *offp; -#endif if (SCTP_GET_PKT_VRFID(*i_pak, vrf_id)) { SCTP_RELEASE_PKT(*i_pak); return (IPPROTO_DONE); } m = SCTP_HEADER_TO_CHAIN(*i_pak); -#ifdef __Panda__ - SCTP_DETACH_HEADER_FROM_CHAIN(*i_pak); - (void)SCTP_RELEASE_HEADER(*i_pak); -#endif #ifdef SCTP_MBUF_LOGGING /* Log in any input mbufs */ if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { @@ -184,25 +170,11 @@ sctp6_input(struct mbuf **i_pak, int *offp, int proto) } #endif #if defined(__FreeBSD__) -#if __FreeBSD_version > 1000049 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, "sctp6_input(): Packet of length %d received on %s with csum_flags 0x%b.\n", m->m_pkthdr.len, if_name(m->m_pkthdr.rcvif), (int)m->m_pkthdr.csum_flags, CSUM_BITS); -#elif __FreeBSD_version >= 800000 - SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, - "sctp6_input(): Packet of length %d received on %s with csum_flags 0x%x.\n", - m->m_pkthdr.len, - if_name(m->m_pkthdr.rcvif), - m->m_pkthdr.csum_flags); -#else - SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, - "sctp6_input(): Packet of length %d received on %s with csum_flags 0x%x.\n", - m->m_pkthdr.len, - m->m_pkthdr.rcvif->if_xname, - m->m_pkthdr.csum_flags); -#endif #endif #if defined(__APPLE__) SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, @@ -212,7 +184,7 @@ sctp6_input(struct mbuf **i_pak, int *offp, int proto) m->m_pkthdr.rcvif->if_unit, m->m_pkthdr.csum_flags); #endif -#if defined(__Windows__) +#if defined(_WIN32) && !defined(__Userspace__) SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, "sctp6_input(): Packet of length %d received on %s with csum_flags 0x%x.\n", m->m_pkthdr.len, @@ -287,13 +259,14 @@ sctp6_input(struct mbuf **i_pak, int *offp, int proto) if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { goto out; } - ecn_bits = ((ntohl(ip6->ip6_flow) >> 20) & 0x000000ff); -#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 +#if defined(__FreeBSD__) + ecn_bits = IPV6_TRAFFIC_CLASS(ip6); if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) { SCTP_STAT_INCR(sctps_recvhwcrc); compute_crc = 0; } else { #else + ecn_bits = ((ntohl(ip6->ip6_flow) >> 20) & 0x000000ff); if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) && (IN6_ARE_ADDR_EQUAL(&src.sin6_addr, &dst.sin6_addr))) { SCTP_STAT_INCR(sctps_recvhwcrc); @@ -327,7 +300,6 @@ sctp6_input(struct mbuf **i_pak, int *offp) return (sctp6_input_with_port(i_pak, offp, 0)); } #endif - #if defined(__FreeBSD__) int sctp6_input(struct mbuf **i_pak, int *offp, int proto SCTP_UNUSED) @@ -344,7 +316,7 @@ sctp6_notify(struct sctp_inpcb *inp, uint8_t icmp6_code, uint32_t next_mtu) { -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) struct socket *so; #endif int timer_stopped; @@ -369,8 +341,8 @@ sctp6_notify(struct sctp_inpcb *inp, case ICMP6_PARAM_PROB: /* Treat it like an ABORT. */ if (icmp6_code == ICMP6_PARAMPROB_NEXTHEADER) { - sctp_abort_notification(stcb, 1, 0, NULL, SCTP_SO_NOT_LOCKED); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) + sctp_abort_notification(stcb, true, false, 0, NULL, SCTP_SO_NOT_LOCKED); +#if defined(__APPLE__) so = SCTP_INP_SO(inp); atomic_add_int(&stcb->asoc.refcnt, 1); SCTP_TCB_UNLOCK(stcb); @@ -380,7 +352,7 @@ sctp6_notify(struct sctp_inpcb *inp, #endif (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2); -#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) +#if defined(__APPLE__) SCTP_SOCKET_UNLOCK(so, 1); #endif } else { @@ -595,11 +567,6 @@ sctp6_ctlinput(int cmd, struct sockaddr *pktdst, void *d) } #endif } else { -#if defined(__FreeBSD__) && __FreeBSD_version < 500000 - if (PRC_IS_REDIRECT(cmd) && (inp != NULL)) { - in6_rtchange(inp, inet6ctlerrmap[cmd]); - } -#endif if ((stcb == NULL) && (inp != NULL)) { /* reduce inp's ref-count */ SCTP_INP_WLOCK(inp); @@ -618,7 +585,7 @@ sctp6_ctlinput(int cmd, struct sockaddr *pktdst, void *d) * this routine can probably be collasped into the one in sctp_userreq.c * since they do the same thing and now we lookup with a sockaddr */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) static int sctp6_getcred(SYSCTL_HANDLER_ARGS) { @@ -630,16 +597,10 @@ sctp6_getcred(SYSCTL_HANDLER_ARGS) int error; uint32_t vrf_id; -#if defined(__FreeBSD__) || defined(__APPLE__) vrf_id = SCTP_DEFAULT_VRFID; -#else - vrf_id = panda_get_vrf_from_call(); /* from connectx call? */ -#endif -#if defined(__FreeBSD__) && __FreeBSD_version > 602000 +#if defined(__FreeBSD__) && !defined(__Userspace__) error = priv_check(req->td, PRIV_NETINET_GETCRED); -#elif defined(__FreeBSD__) && __FreeBSD_version >= 500000 - error = suser(req->td); #else error = suser(req->p); #endif @@ -694,34 +655,40 @@ out: return (error); } -SYSCTL_PROC(_net_inet6_sctp6, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW, - 0, 0, - sctp6_getcred, "S,ucred", "Get the ucred of a SCTP6 connection"); - +SYSCTL_PROC(_net_inet6_sctp6, OID_AUTO, getcred, + CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_NEEDGIANT, + 0, 0, sctp6_getcred, "S,ucred", + "Get the ucred of a SCTP6 connection"); #endif /* This is the same as the sctp_abort() could be made common */ -#if (defined(__FreeBSD__) && __FreeBSD_version > 690000) || defined(__Windows__) -static void -#elif defined(__Panda__) || defined(__Userspace__) +#if defined(__Userspace__) int +#elif defined(__FreeBSD__) || defined(_WIN32) +static void #else static int #endif sctp6_abort(struct socket *so) { +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct epoch_tracker et; +#endif struct sctp_inpcb *inp; uint32_t flags; inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); -#if (defined(__FreeBSD__) && __FreeBSD_version > 690000) || defined(__Windows__) +#if (defined(__FreeBSD__) || defined(_WIN32)) && !defined(__Userspace__) return; #else return (EINVAL); #endif } +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif sctp_must_try_again: flags = inp->sctp_flags; #ifdef SCTP_LOG_CLOSING @@ -740,7 +707,7 @@ sctp6_abort(struct socket *so) * here for the accounting/select. */ SCTP_SB_CLEAR(so->so_rcv); -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) so->so_usecount--; #else /* Now null out the reference, we are completely detached. */ @@ -753,20 +720,21 @@ sctp6_abort(struct socket *so) goto sctp_must_try_again; } } -#if (defined(__FreeBSD__) && __FreeBSD_version > 690000) || defined(__Windows__) +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); return; #else return (0); #endif } -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 -static int -sctp6_attach(struct socket *so, int proto SCTP_UNUSED, struct thread *p SCTP_UNUSED) -#elif defined(__Panda__) || defined(__Userspace__) +#if defined(__Userspace__) int sctp6_attach(struct socket *so, int proto SCTP_UNUSED, uint32_t vrf_id) -#elif defined(__Windows__) +#elif defined(__FreeBSD__) +static int +sctp6_attach(struct socket *so, int proto SCTP_UNUSED, struct thread *p SCTP_UNUSED) +#elif defined(_WIN32) static int sctp6_attach(struct socket *so, int proto SCTP_UNUSED, PKTHREAD p SCTP_UNUSED) #else @@ -776,7 +744,7 @@ sctp6_attach(struct socket *so, int proto SCTP_UNUSED, struct proc *p SCTP_UNUSE { int error; struct sctp_inpcb *inp; -#if !defined(__Panda__) && !defined(__Userspace__) +#if !defined(__Userspace__) uint32_t vrf_id = SCTP_DEFAULT_VRFID; #endif @@ -798,15 +766,9 @@ sctp6_attach(struct socket *so, int proto SCTP_UNUSED, struct proc *p SCTP_UNUSE SCTP_INP_WLOCK(inp); inp->sctp_flags |= SCTP_PCB_FLAGS_BOUND_V6; /* I'm v6! */ -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__) inp->ip_inp.inp.inp_vflag |= INP_IPV6; -#else - inp->inp_vflag |= INP_IPV6; -#endif -#if !defined(__Panda__) inp->ip_inp.inp.in6p_hops = -1; /* use kernel default */ inp->ip_inp.inp.in6p_cksum = -1; /* just to be sure */ -#endif #ifdef INET /* * XXX: ugly!! IPv4 TTL initialization is necessary for an IPv6 @@ -819,19 +781,19 @@ sctp6_attach(struct socket *so, int proto SCTP_UNUSED, struct proc *p SCTP_UNUSE return (0); } -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 -static int -sctp6_bind(struct socket *so, struct sockaddr *addr, struct thread *p) -{ -#elif defined(__FreeBSD__) || defined(__APPLE__) -static int -sctp6_bind(struct socket *so, struct sockaddr *addr, struct proc *p) -{ -#elif defined(__Panda__) || defined(__Userspace__) +#if defined(__Userspace__) int sctp6_bind(struct socket *so, struct sockaddr *addr, void * p) { -#elif defined(__Windows__) +#elif defined(__FreeBSD__) +static int +sctp6_bind(struct socket *so, struct sockaddr *addr, struct thread *p) +{ +#elif defined(__APPLE__) +static int +sctp6_bind(struct socket *so, struct sockaddr *addr, struct proc *p) +{ +#elif defined(_WIN32) static int sctp6_bind(struct socket *so, struct sockaddr *addr, PKTHREAD p) { @@ -852,7 +814,7 @@ sctp6_bind(struct socket *so, struct mbuf *nam, struct proc *p) return (EINVAL); } -#if !defined(__Windows__) +#if !(defined(_WIN32) && !defined(__Userspace__)) if (addr) { switch (addr->sa_family) { #ifdef INET @@ -881,27 +843,16 @@ sctp6_bind(struct socket *so, struct mbuf *nam, struct proc *p) } } #endif -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__) vflagsav = inp->ip_inp.inp.inp_vflag; inp->ip_inp.inp.inp_vflag &= ~INP_IPV4; inp->ip_inp.inp.inp_vflag |= INP_IPV6; -#else - vflagsav = inp->inp_vflag; - inp->inp_vflag &= ~INP_IPV4; - inp->inp_vflag |= INP_IPV6; -#endif if ((addr != NULL) && (SCTP_IPV6_V6ONLY(inp) == 0)) { switch (addr->sa_family) { #ifdef INET case AF_INET: /* binding v4 addr to v6 socket, so reset flags */ -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__) inp->ip_inp.inp.inp_vflag |= INP_IPV4; inp->ip_inp.inp.inp_vflag &= ~INP_IPV6; -#else - inp->inp_vflag |= INP_IPV4; - inp->inp_vflag &= ~INP_IPV6; -#endif break; #endif #ifdef INET6 @@ -912,24 +863,15 @@ sctp6_bind(struct socket *so, struct mbuf *nam, struct proc *p) sin6_p = (struct sockaddr_in6 *)addr; if (IN6_IS_ADDR_UNSPECIFIED(&sin6_p->sin6_addr)) { -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__) inp->ip_inp.inp.inp_vflag |= INP_IPV4; -#else - inp->inp_vflag |= INP_IPV4; -#endif } #ifdef INET if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) { struct sockaddr_in sin; in6_sin6_2_sin(&sin, sin6_p); -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__) inp->ip_inp.inp.inp_vflag |= INP_IPV4; inp->ip_inp.inp.inp_vflag &= ~INP_IPV6; -#else - inp->inp_vflag |= INP_IPV4; - inp->inp_vflag &= ~INP_IPV6; -#endif error = sctp_inpcb_bind(so, (struct sockaddr *)&sin, NULL, p); goto out; } @@ -965,16 +907,11 @@ sctp6_bind(struct socket *so, struct mbuf *nam, struct proc *p) error = sctp_inpcb_bind(so, addr, NULL, p); out: if (error != 0) -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__) inp->ip_inp.inp.inp_vflag = vflagsav; -#else - inp->inp_vflag = vflagsav; -#endif return (error); } - -#if (defined(__FreeBSD__) && __FreeBSD_version > 690000) || defined(__Windows__) || defined(__Userspace__) +#if defined(__FreeBSD__) || defined(_WIN32) || defined(__Userspace__) #if !defined(__Userspace__) static void #else @@ -988,9 +925,7 @@ sctp6_close(struct socket *so) /* This could be made common with sctp_detach() since they are identical */ #else -#if !defined(__Panda__) static -#endif int sctp6_detach(struct socket *so) { @@ -1004,7 +939,7 @@ sctp6_detach(struct socket *so) #endif -#if !defined(__Panda__) && !defined(__Userspace__) +#if !defined(__Userspace__) static #endif int @@ -1013,25 +948,22 @@ sctp6_disconnect(struct socket *so) return (sctp_disconnect(so)); } - int -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 +#if defined(__FreeBSD__) && !defined(__Userspace__) sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p); - #else sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct proc *p); - #endif -#if !defined(__Panda__) && !defined(__Windows__) && !defined(__Userspace__) -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 +#if !defined(_WIN32) && !defined(__Userspace__) +#if defined(__FreeBSD__) static int sctp6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct thread *p) { -#elif defined(__FreeBSD__) || defined(__APPLE__) +#elif defined(__APPLE__) static int sctp6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct proc *p) @@ -1077,6 +1009,46 @@ sctp6_send(struct socket *so, int flags, struct mbuf *m, struct mbuf *nam, SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EDESTADDRREQ); return (EDESTADDRREQ); } + switch (addr->sa_family) { +#ifdef INET + case AF_INET: +#if defined(HAVE_SA_LEN) + if (addr->sa_len != sizeof(struct sockaddr_in)) { + if (control) { + SCTP_RELEASE_PKT(control); + control = NULL; + } + SCTP_RELEASE_PKT(m); + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); + return (EINVAL); + } +#endif + break; +#endif +#ifdef INET6 + case AF_INET6: +#if defined(HAVE_SA_LEN) + if (addr->sa_len != sizeof(struct sockaddr_in6)) { + if (control) { + SCTP_RELEASE_PKT(control); + control = NULL; + } + SCTP_RELEASE_PKT(m); + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); + return (EINVAL); + } +#endif + break; +#endif + default: + if (control) { + SCTP_RELEASE_PKT(control); + control = NULL; + } + SCTP_RELEASE_PKT(m); + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); + return (EINVAL); + } #ifdef INET sin6 = (struct sockaddr_in6 *)addr; if (SCTP_IPV6_V6ONLY(inp)) { @@ -1085,15 +1057,26 @@ sctp6_send(struct socket *so, int flags, struct mbuf *m, struct mbuf *nam, * v4 addr or v4-mapped addr */ if (addr->sa_family == AF_INET) { + if (control) { + SCTP_RELEASE_PKT(control); + control = NULL; + } + SCTP_RELEASE_PKT(m); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { + if (control) { + SCTP_RELEASE_PKT(control); + control = NULL; + } + SCTP_RELEASE_PKT(m); SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } } - if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { + if ((addr->sa_family == AF_INET6) && + IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { struct sockaddr_in sin; /* convert v4-mapped into v4 addr and send */ @@ -1119,7 +1102,7 @@ connected_type: inp->pkt_last = inp->pkt = m; } if ( -#if defined(__FreeBSD__) || defined(__APPLE__) +#if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__) /* FreeBSD and MacOSX uses a flag passed */ ((flags & PRUS_MORETOCOME) == 0) #else @@ -1134,9 +1117,18 @@ connected_type: * optionaly switch back to this code (by changing back the * defininitions but this is not advisable. */ +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct epoch_tracker et; +#endif int ret; +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif inp->pkt = NULL; inp->control = NULL; return (ret); @@ -1146,32 +1138,31 @@ connected_type: } #endif -#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 -static int -sctp6_connect(struct socket *so, struct sockaddr *addr, struct thread *p) -{ -#elif defined(__FreeBSD__) || defined(__APPLE__) -static int -sctp6_connect(struct socket *so, struct sockaddr *addr, struct proc *p) -{ -#elif defined(__Panda__) -int -sctp6_connect(struct socket *so, struct sockaddr *addr, void *p) -{ -#elif defined(__Windows__) -static int -sctp6_connect(struct socket *so, struct sockaddr *addr, PKTHREAD p) -{ -#elif defined(__Userspace__) +#if defined(__Userspace__) int sctp6_connect(struct socket *so, struct sockaddr *addr) { void *p = NULL; +#elif defined(__FreeBSD__) +static int +sctp6_connect(struct socket *so, struct sockaddr *addr, struct thread *p) +{ +#elif defined(__APPLE__) +static int +sctp6_connect(struct socket *so, struct sockaddr *addr, struct proc *p) +{ +#elif defined(_WIN32) +static int +sctp6_connect(struct socket *so, struct sockaddr *addr, PKTHREAD p) +{ #else static int sctp6_connect(struct socket *so, struct mbuf *nam, struct proc *p) { struct sockaddr *addr = mtod(nam, struct sockaddr *); +#endif +#if defined(__FreeBSD__) && !defined(__Userspace__) + struct epoch_tracker et; #endif uint32_t vrf_id; int error = 0; @@ -1192,7 +1183,7 @@ sctp6_connect(struct socket *so, struct mbuf *nam, struct proc *p) SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EINVAL); return (EINVAL); } -#if !defined(__Windows__) +#if !(defined(_WIN32) && !defined(__Userspace__)) switch (addr->sa_family) { #ifdef INET case AF_INET: @@ -1263,7 +1254,8 @@ sctp6_connect(struct socket *so, struct mbuf *nam, struct proc *p) return (EINVAL); } } - if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { + if ((addr->sa_family == AF_INET6) && + IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { /* convert v4-mapped into v4 addr */ in6_sin6_2_sin(&store.sin, sin6); addr = &store.sa; @@ -1297,36 +1289,33 @@ sctp6_connect(struct socket *so, struct mbuf *nam, struct proc *p) return (EALREADY); } /* We are GOOD to go */ - stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, - inp->sctp_ep.pre_open_stream_count, - inp->sctp_ep.port, p, - SCTP_INITIALIZE_AUTH_PARAMS); + stcb = sctp_aloc_assoc_connected(inp, addr, &error, 0, 0, vrf_id, + inp->sctp_ep.pre_open_stream_count, + inp->sctp_ep.port, p, + SCTP_INITIALIZE_AUTH_PARAMS); SCTP_ASOC_CREATE_UNLOCK(inp); if (stcb == NULL) { /* Gak! no memory */ return (error); } - if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { - stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; - /* Set the connected flag so we can queue data */ - soisconnecting(so); - } SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_ENTER(et); +#endif sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); SCTP_TCB_UNLOCK(stcb); +#if defined(__FreeBSD__) && !defined(__Userspace__) + NET_EPOCH_EXIT(et); +#endif return (error); } static int -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) sctp6_getaddr(struct socket *so, struct sockaddr **addr) { struct sockaddr_in6 *sin6; -#elif defined(__Panda__) -sctp6_getaddr(struct socket *so, struct sockaddr *addr) -{ - struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; #else sctp6_getaddr(struct socket *so, struct mbuf *nam) { @@ -1336,19 +1325,17 @@ sctp6_getaddr(struct socket *so, struct mbuf *nam) uint32_t vrf_id; struct sctp_ifa *sctp_ifa; -#ifdef SCTP_KAME +#if defined(SCTP_KAME) && defined(SCTP_EMBEDDED_V6_SCOPE) int error; -#endif /* SCTP_KAME */ +#endif /* * Do the malloc first in case it blocks. */ -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof(*sin6)); if (sin6 == NULL) return (ENOMEM); -#elif defined(__Panda__) - memset(sin6, 0, sizeof(*sin6)); #else SCTP_BUF_LEN(nam) = sizeof(*sin6); memset(sin6, 0, sizeof(*sin6)); @@ -1360,7 +1347,7 @@ sctp6_getaddr(struct socket *so, struct mbuf *nam) inp = (struct sctp_inpcb *)so->so_pcb; if (inp == NULL) { -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_FREE_SONAME(sin6); #endif SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ECONNRESET); @@ -1378,7 +1365,7 @@ sctp6_getaddr(struct socket *so, struct mbuf *nam) stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { SCTP_INP_RUNLOCK(inp); -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_FREE_SONAME(sin6); #endif SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT); @@ -1400,7 +1387,7 @@ sctp6_getaddr(struct socket *so, struct mbuf *nam) if ((!fnd) || (sin_a6 == NULL)) { /* punt */ SCTP_INP_RUNLOCK(inp); -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_FREE_SONAME(sin6); #endif SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT); @@ -1431,7 +1418,7 @@ sctp6_getaddr(struct socket *so, struct mbuf *nam) } } if (!fnd) { -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_FREE_SONAME(sin6); #endif SCTP_INP_RUNLOCK(inp); @@ -1455,21 +1442,17 @@ sctp6_getaddr(struct socket *so, struct mbuf *nam) sin6->sin6_scope_id = 0; /* XXX */ #endif /* SCTP_KAME */ #endif /* SCTP_EMBEDDED_V6_SCOPE */ -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) (*addr) = (struct sockaddr *)sin6; #endif return (0); } static int -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) sctp6_peeraddr(struct socket *so, struct sockaddr **addr) { struct sockaddr_in6 *sin6; -#elif defined(__Panda__) -sctp6_peeraddr(struct socket *so, struct sockaddr *addr) -{ - struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; #else sctp6_peeraddr(struct socket *so, struct mbuf *nam) { @@ -1485,12 +1468,10 @@ sctp6_peeraddr(struct socket *so, struct mbuf *nam) #endif /* Do the malloc first in case it blocks. */ -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); if (sin6 == NULL) return (ENOMEM); -#elif defined(__Panda__) - memset(sin6, 0, sizeof(*sin6)); #else SCTP_BUF_LEN(nam) = sizeof(*sin6); memset(sin6, 0, sizeof(*sin6)); @@ -1504,7 +1485,7 @@ sctp6_peeraddr(struct socket *so, struct mbuf *nam) if ((inp == NULL) || ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { /* UDP type and listeners will drop out here */ -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_FREE_SONAME(sin6); #endif SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOTCONN); @@ -1517,7 +1498,7 @@ sctp6_peeraddr(struct socket *so, struct mbuf *nam) } SCTP_INP_RUNLOCK(inp); if (stcb == NULL) { -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_FREE_SONAME(sin6); #endif SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ECONNRESET); @@ -1536,7 +1517,7 @@ sctp6_peeraddr(struct socket *so, struct mbuf *nam) SCTP_TCB_UNLOCK(stcb); if (!fnd) { /* No IPv4 address */ -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_FREE_SONAME(sin6); #endif SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT); @@ -1545,7 +1526,7 @@ sctp6_peeraddr(struct socket *so, struct mbuf *nam) #ifdef SCTP_EMBEDDED_V6_SCOPE #ifdef SCTP_KAME if ((error = sa6_recoverscope(sin6)) != 0) { -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_FREE_SONAME(sin6); #endif SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, error); @@ -1555,21 +1536,16 @@ sctp6_peeraddr(struct socket *so, struct mbuf *nam) in6_recoverscope(sin6, &sin6->sin6_addr, NULL); #endif /* SCTP_KAME */ #endif /* SCTP_EMBEDDED_V6_SCOPE */ -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) *addr = (struct sockaddr *)sin6; #endif return (0); } -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) static int sctp6_in6getaddr(struct socket *so, struct sockaddr **nam) { -#elif defined(__Panda__) -int -sctp6_in6getaddr(struct socket *so, struct sockaddr *nam, uint32_t *namelen) -{ - struct sockaddr *addr = nam; #elif defined(__Userspace__) int sctp6_in6getaddr(struct socket *so, struct mbuf *nam) @@ -1597,7 +1573,7 @@ sctp6_in6getaddr(struct socket *so, struct mbuf *nam) error = sctp6_getaddr(so, nam); #ifdef INET if (error) { -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) struct sockaddr_in6 *sin6; #else struct sockaddr_in6 sin6; @@ -1608,7 +1584,7 @@ sctp6_in6getaddr(struct socket *so, struct mbuf *nam) if (error) { return (error); } -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); if (sin6 == NULL) { SCTP_FREE_SONAME(*nam); @@ -1623,23 +1599,14 @@ sctp6_in6getaddr(struct socket *so, struct mbuf *nam) memcpy(addr, &sin6, sizeof(struct sockaddr_in6)); #endif } -#endif -#if defined(__Panda__) - *namelen = nam->sa_len; #endif return (error); } - -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) static int sctp6_getpeeraddr(struct socket *so, struct sockaddr **nam) { -#elif defined(__Panda__) -int -sctp6_getpeeraddr(struct socket *so, struct sockaddr *nam, uint32_t *namelen) -{ - struct sockaddr *addr = (struct sockaddr *)nam; #elif defined(__Userspace__) int sctp6_getpeeraddr(struct socket *so, struct mbuf *nam) @@ -1669,7 +1636,7 @@ sctp6_getpeeraddr(struct socket *so, struct mbuf *nam) error = sctp6_peeraddr(so, nam); #ifdef INET if (error) { -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) struct sockaddr_in6 *sin6; #else struct sockaddr_in6 sin6; @@ -1680,7 +1647,7 @@ sctp6_getpeeraddr(struct socket *so, struct mbuf *nam) if (error) { return (error); } -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); if (sin6 == NULL) { SCTP_FREE_SONAME(*nam); @@ -1695,14 +1662,11 @@ sctp6_getpeeraddr(struct socket *so, struct mbuf *nam) memcpy(addr, &sin6, sizeof(struct sockaddr_in6)); #endif } -#endif -#if defined(__Panda__) - *namelen = nam->sa_len; #endif return (error); } -#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) struct pr_usrreqs sctp6_usrreqs = { #if defined(__FreeBSD__) .pru_abort = sctp6_abort, @@ -1711,15 +1675,10 @@ struct pr_usrreqs sctp6_usrreqs = { .pru_bind = sctp6_bind, .pru_connect = sctp6_connect, .pru_control = in6_control, -#if __FreeBSD_version >= 690000 .pru_close = sctp6_close, .pru_detach = sctp6_close, .pru_sopoll = sopoll_generic, .pru_flush = sctp_flush, -#else - .pru_detach = sctp6_detach, - .pru_sopoll = sopoll, -#endif .pru_disconnect = sctp6_disconnect, .pru_listen = sctp_listen, .pru_peeraddr = sctp6_getpeeraddr, @@ -1728,7 +1687,7 @@ struct pr_usrreqs sctp6_usrreqs = { .pru_sockaddr = sctp6_in6getaddr, .pru_sosend = sctp_sosend, .pru_soreceive = sctp_soreceive -#elif defined(__APPLE__) +#elif defined(__APPLE__) && !defined(__Userspace__) .pru_abort = sctp6_abort, .pru_accept = sctp_accept, .pru_attach = sctp6_attach, @@ -1749,7 +1708,7 @@ struct pr_usrreqs sctp6_usrreqs = { .pru_sosend = sctp_sosend, .pru_soreceive = sctp_soreceive, .pru_sopoll = sopoll -#elif defined(__Windows__) +#elif defined(_WIN32) && !defined(__Userspace__) sctp6_abort, sctp_accept, sctp6_attach, @@ -1776,7 +1735,7 @@ struct pr_usrreqs sctp6_usrreqs = { #endif }; -#elif !defined(__Panda__) && !defined(__Userspace__) +#elif !defined(__Userspace__) int sctp6_usrreq(so, req, m, nam, control, p) struct socket *so; @@ -1784,22 +1743,22 @@ sctp6_usrreq(so, req, m, nam, control, p) struct mbuf *m, *nam, *control; struct proc *p; { - int s; - int error = 0; + int error; int family; - uint32_t vrf_id; + family = so->so_proto->pr_domain->dom_family; if (req == PRU_CONTROL) { switch (family) { case PF_INET: error = in_control(so, (long)m, (caddr_t)nam, - (struct ifnet *)control - ); + (struct ifnet *)control); + break; #ifdef INET6 case PF_INET6: error = in6_control(so, (long)m, (caddr_t)nam, (struct ifnet *)control, p); + break; #endif default: SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, EAFNOSUPPORT); @@ -1883,6 +1842,7 @@ sctp6_usrreq(so, req, m, nam, control, p) error = 0; break; default: + error = 0; break; } return (error); diff --git a/TMessagesProj/jni/third_party/usrsctplib/netinet6/sctp6_var.h b/TMessagesProj/jni/third_party/usrsctplib/netinet6/sctp6_var.h index e10453dae..56a6c3af3 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/netinet6/sctp6_var.h +++ b/TMessagesProj/jni/third_party/usrsctplib/netinet6/sctp6_var.h @@ -32,7 +32,7 @@ * THE POSSIBILITY OF SUCH DAMAGE. */ -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) && !defined(__Userspace__) #include __FBSDID("$FreeBSD: head/sys/netinet6/sctp6_var.h 317457 2017-04-26 19:26:40Z tuexen $"); #endif @@ -49,40 +49,28 @@ extern void in6_sin_2_v4mapsin6(const struct sockaddr_in *, struct sockaddr_in6 #endif #if defined(_KERNEL) -#if defined(__FreeBSD__) || (__APPLE__) || defined(__Windows__) +#if !defined(__Userspace__) SYSCTL_DECL(_net_inet6_sctp6); extern struct pr_usrreqs sctp6_usrreqs; #else int sctp6_usrreq(struct socket *, int, struct mbuf *, struct mbuf *, struct mbuf *); #endif -#if defined(__APPLE__) +#if defined(__APPLE__) && !defined(__Userspace__) int sctp6_input(struct mbuf **, int *); int sctp6_input_with_port(struct mbuf **, int *, uint16_t); -#elif defined(__Panda__) -int sctp6_input (pakhandle_type *); -#elif defined(__FreeBSD__) && __FreeBSD_version < 902000 -int sctp6_input __P((struct mbuf **, int *, int)); -int sctp6_input_with_port __P((struct mbuf **, int *, uint16_t)); #else int sctp6_input(struct mbuf **, int *, int); int sctp6_input_with_port(struct mbuf **, int *, uint16_t); #endif -#if defined(__FreeBSD__) && __FreeBSD_version < 902000 -int sctp6_output -__P((struct sctp_inpcb *, struct mbuf *, struct sockaddr *, - struct mbuf *, struct proc *)); -void sctp6_ctlinput __P((int, struct sockaddr *, void *)); -#else int sctp6_output(struct sctp_inpcb *, struct mbuf *, struct sockaddr *, struct mbuf *, struct proc *); -#if defined(__APPLE__) && !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) && !defined(APPLE_ELCAPITAN) +#if defined(__APPLE__) && !defined(__Userspace__) && !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) && !defined(APPLE_ELCAPITAN) void sctp6_ctlinput(int, struct sockaddr *, void *, struct ifnet * SCTP_UNUSED); #else void sctp6_ctlinput(int, struct sockaddr *, void *); #endif -#endif -#if !(defined(__FreeBSD__) || defined(__APPLE__)) +#if !((defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__)) extern void in6_sin_2_v4mapsin6(struct sockaddr_in *, struct sockaddr_in6 *); extern void in6_sin6_2_sin(struct sockaddr_in *, struct sockaddr_in6 *); extern void in6_sin6_2_sin_in_sock(struct sockaddr *); diff --git a/TMessagesProj/jni/third_party/usrsctplib/user_atomic.h b/TMessagesProj/jni/third_party/usrsctplib/user_atomic.h index 77ea77b67..6a59587ef 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/user_atomic.h +++ b/TMessagesProj/jni/third_party/usrsctplib/user_atomic.h @@ -42,8 +42,8 @@ #include #include -#if defined(__Userspace_os_Darwin) || defined (__Userspace_os_Windows) -#if defined (__Userspace_os_Windows) +#if defined(__APPLE__) || defined(_WIN32) +#if defined(_WIN32) #define atomic_add_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val) #define atomic_fetchadd_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val) #define atomic_subtract_int(addr, val) InterlockedExchangeAdd((LPLONG)addr,-((LONG)val)) @@ -77,7 +77,7 @@ } \ } #endif -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) static void atomic_init(void) {} /* empty when we are not using atomic_mtx */ #else static inline void atomic_init(void) {} /* empty when we are not using atomic_mtx */ @@ -173,7 +173,7 @@ static inline void atomic_init() {} /* empty when we are not using atomic_mtx */ extern userland_mutex_t atomic_mtx; -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) static inline void atomic_init() { InitializeCriticalSection(&atomic_mtx); } diff --git a/TMessagesProj/jni/third_party/usrsctplib/user_environment.c b/TMessagesProj/jni/third_party/usrsctplib/user_environment.c index cb77a220e..188771891 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/user_environment.c +++ b/TMessagesProj/jni/third_party/usrsctplib/user_environment.c @@ -30,18 +30,23 @@ /* __Userspace__ */ -#include -#if !defined (__Userspace_os_Windows) +#if defined(_WIN32) +#if !defined(_CRT_RAND_S) && !defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) +#define _CRT_RAND_S +#endif +#else #include #include #endif +#ifdef INVARIANTS +#include +#endif #include #include /* #include defines MIN */ #if !defined(MIN) #define MIN(arg1,arg2) ((arg1) < (arg2) ? (arg1) : (arg2)) #endif -#include #define uHZ 1000 @@ -64,50 +69,316 @@ userland_mutex_t atomic_mtx; * provide _some_ kind of randomness. This should only be used * inside other RNG's, like arc4random(9). */ -#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -static int -read_random_phony(void *buf, int count) +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) +#include + +void +init_random(void) { - memset(buf, 'A', count); - return (count); + return; } -#else -#if defined(__Userspace_os_FreeBSD) || defined(__Userspace_os_Darwin) -static int -read_random_phony(void *buf, int count) + +void +read_random(void *buf, size_t size) { - if (count >= 0) { - arc4random_buf(buf, count); + memset(buf, 'A', size); + return; +} + +void +finish_random(void) +{ + return; +} +/* This define can be used to optionally use OpenSSL's random number utility, + * which is capable of bypassing the chromium sandbox which normally would + * prevent opening files, including /dev/urandom. + */ +#elif defined(SCTP_USE_OPENSSL_RAND) +#include + +/* Requiring BoringSSL because it guarantees that RAND_bytes will succeed. */ +#ifndef OPENSSL_IS_BORINGSSL +#error Only BoringSSL is supported with SCTP_USE_OPENSSL_RAND. +#endif + +void +init_random(void) +{ + return; +} + +void +read_random(void *buf, size_t size) +{ + RAND_bytes((uint8_t *)buf, size); + return; +} + +void +finish_random(void) +{ + return; +} +#elif defined(__FreeBSD__) || defined(__DragonFly__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__APPLE__) || defined(__Bitrig__) +#include + +void +init_random(void) +{ + return; +} + +void +read_random(void *buf, size_t size) +{ + arc4random_buf(buf, size); + return; +} + +void +finish_random(void) +{ + return; +} +#elif defined(_WIN32) +#include + +void +init_random(void) +{ + return; +} + +void +read_random(void *buf, size_t size) +{ + unsigned int randval; + size_t position, remaining; + + position = 0; + while (position < size) { + if (rand_s(&randval) == 0) { + remaining = MIN(size - position, sizeof(unsigned int)); + memcpy((char *)buf + position, &randval, remaining); + position += sizeof(unsigned int); + } } - return (count); + return; } -#else -static int -read_random_phony(void *buf, int count) + +void +finish_random(void) { - uint32_t randval; - int size, i; - - /* srandom() is called in kern/init_main.c:proc0_post() */ - - /* Fill buf[] with random(9) output */ - for (i = 0; i < count; i+= (int)sizeof(uint32_t)) { - randval = random(); - size = MIN(count - i, (int)sizeof(uint32_t)); - memcpy(&((char *)buf)[i], &randval, (size_t)size); - } - - return (count); + return; } +#elif (defined(__ANDROID__) && (__ANDROID_API__ < 28)) || defined(__EMSCRIPTEN__) +#include + +static int fd = -1; + +void +init_random(void) +{ + fd = open("/dev/urandom", O_RDONLY); + return; +} + +void +read_random(void *buf, size_t size) +{ + size_t position; + ssize_t n; + + position = 0; + while (position < size) { + n = read(fd, (char *)buf + position, size - position); + if (n > 0) { + position += n; + } + } + return; +} + +void +finish_random(void) +{ + close(fd); + return; +} +#elif defined(__ANDROID__) && (__ANDROID_API__ >= 28) +#include + +void +init_random(void) +{ + return; +} + +void +read_random(void *buf, size_t size) +{ + size_t position; + ssize_t n; + + position = 0; + while (position < size) { + n = getrandom((char *)buf + position, size - position, 0); + if (n > 0) { + position += n; + } + } + return; +} + +void +finish_random(void) +{ + return; +} +#elif defined(__linux__) +#include +#include +#include + +#if defined(__has_feature) +#if __has_feature(memory_sanitizer) +void __msan_unpoison(void *, size_t); #endif #endif -static int (*read_func)(void *, int) = read_random_phony; +#ifdef __NR_getrandom +#if !defined(GRND_NONBLOCK) +#define GRND_NONBLOCK 1 +#endif +static int getrandom_available = 0; +#endif +static int fd = -1; -/* Userland-visible version of read_random */ -int -read_random(void *buf, int count) +void +init_random(void) { - return ((*read_func)(buf, count)); +#ifdef __NR_getrandom + char dummy; + ssize_t n = syscall(__NR_getrandom, &dummy, sizeof(dummy), GRND_NONBLOCK); + if (n > 0 || errno == EINTR || errno == EAGAIN) { + /* Either getrandom succeeded, was interrupted or is waiting for entropy; + * all of which mean the syscall is available. + */ + getrandom_available = 1; + } else { +#ifdef INVARIANTS + if (errno != ENOSYS) { + panic("getrandom syscall returned unexpected error: %d", errno); + } +#endif + /* If the syscall isn't available, fall back to /dev/urandom. */ +#endif + fd = open("/dev/urandom", O_RDONLY); +#ifdef __NR_getrandom + } +#endif + return; } +void +read_random(void *buf, size_t size) +{ + size_t position; + ssize_t n; + + position = 0; + while (position < size) { +#ifdef __NR_getrandom + if (getrandom_available) { + /* Using syscall directly because getrandom isn't present in glibc < 2.25. + */ + n = syscall(__NR_getrandom, (char *)buf + position, size - position, 0); + if (n > 0) { +#if defined(__has_feature) +#if __has_feature(memory_sanitizer) + /* Need to do this because MSan doesn't realize that syscall has + * initialized the output buffer. + */ + __msan_unpoison(buf + position, n); +#endif +#endif + position += n; + } else if (errno != EINTR && errno != EAGAIN) { +#ifdef INVARIANTS + panic("getrandom syscall returned unexpected error: %d", errno); +#endif + } + } else +#endif /* __NR_getrandom */ + { + n = read(fd, (char *)buf + position, size - position); + if (n > 0) { + position += n; + } + } + } + return; +} + +void +finish_random(void) +{ + if (fd != -1) { + close(fd); + } + return; +} +#elif defined(__Fuchsia__) +#include + +void +init_random(void) +{ + return; +} + +void +read_random(void *buf, size_t size) +{ + zx_cprng_draw(buf, size); + return; +} + +void +finish_random(void) +{ + return; +} +#elif defined(__native_client__) +#include + +void +init_random(void) +{ + return; +} + +void +read_random(void *buf, size_t size) +{ + size_t position; + size_t n; + + position = 0; + while (position < size) { + if (nacl_secure_random((char *)buf + position, size - position, &n) == 0) + position += n; + } + } + return; +} + +void +finish_random(void) +{ + return; +} +#else +#error "Unknown platform. Please provide platform specific RNG." +#endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/user_environment.h b/TMessagesProj/jni/third_party/usrsctplib/user_environment.h index 2b40ceebd..a545accba 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/user_environment.h +++ b/TMessagesProj/jni/third_party/usrsctplib/user_environment.h @@ -33,12 +33,12 @@ /* __Userspace__ */ #include -#ifdef __Userspace_os_FreeBSD +#ifdef __FreeBSD__ #ifndef _SYS_MUTEX_H_ #include #endif #endif -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) #include "netinet/sctp_os_userspace.h" #endif @@ -63,11 +63,13 @@ extern int ipport_firstauto, ipport_lastauto; extern int nmbclusters; #if !defined(_MSC_VER) && !defined(__MINGW32__) -#define min(a,b) ((a)>(b)?(b):(a)) -#define max(a,b) ((a)>(b)?(a):(b)) +#define min(a,b) (((a)>(b))?(b):(a)) +#define max(a,b) (((a)>(b))?(a):(b)) #endif -extern int read_random(void *buf, int count); +void init_random(void); +void read_random(void *, size_t); +void finish_random(void); /* errno's may differ per OS. errno.h now included in sctp_os_userspace.h */ /* Source: /usr/src/sys/sys/errno.h */ @@ -82,14 +84,18 @@ extern int read_random(void *buf, int count); /* Source ip_output.c. extern'd in ip_var.h */ extern u_short ip_id; -#if defined(__Userspace_os_Linux) +#if defined(__linux__) #define IPV6_VERSION 0x60 #endif #if defined(INVARIANTS) #include -static inline void +#if defined(_WIN32) +static inline void __declspec(noreturn) +#else +static inline void __attribute__((__noreturn__)) +#endif terminate_non_graceful(void) { abort(); } diff --git a/TMessagesProj/jni/third_party/usrsctplib/user_inpcb.h b/TMessagesProj/jni/third_party/usrsctplib/user_inpcb.h index 2ac818c70..2e6e9334e 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/user_inpcb.h +++ b/TMessagesProj/jni/third_party/usrsctplib/user_inpcb.h @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors + * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * @@ -54,7 +54,7 @@ LIST_HEAD(inpcbporthead, inpcbport); * the following structure. */ struct in_addr_4in6 { - u_int32_t ia46_pad32[3]; + uint32_t ia46_pad32[3]; struct in_addr ia46_addr4; }; @@ -63,8 +63,8 @@ struct in_addr_4in6 { * some extra padding to accomplish this. */ struct in_endpoints { - u_int16_t ie_fport; /* foreign port */ - u_int16_t ie_lport; /* local port */ + uint16_t ie_fport; /* foreign port */ + uint16_t ie_lport; /* local port */ /* protocol dependent part, local and foreign addr */ union { /* foreign host table entry */ @@ -87,9 +87,9 @@ struct in_endpoints { * references. */ struct in_conninfo { - u_int8_t inc_flags; - u_int8_t inc_len; - u_int16_t inc_pad; /* XXX alignment for in_endpoints */ + uint8_t inc_flags; + uint8_t inc_len; + uint16_t inc_pad; /* XXX alignment for in_endpoints */ /* protocol dependent part */ struct in_endpoints inc_ie; }; @@ -110,7 +110,7 @@ struct inpcb { struct inpcbinfo *inp_pcbinfo; /* PCB list info */ struct socket *inp_socket; /* back pointer to socket */ - u_int32_t inp_flow; + uint32_t inp_flow; int inp_flags; /* generic IP/datagram flags */ u_char inp_vflag; /* IP version flag (v4/v6) */ @@ -329,7 +329,6 @@ struct inpcbinfo { #define INP_CHECK_SOCKAF(so, af) (INP_SOCKAF(so) == af) -/* #ifdef _KERNEL */ extern int ipport_reservedhigh; extern int ipport_reservedlow; extern int ipport_lowfirstauto; @@ -370,6 +369,5 @@ void ipport_tick(void *xtp); */ void db_print_inpcb(struct inpcb *inp, const char *name, int indent); -/* #endif _KERNEL */ #endif /* !_NETINET_IN_PCB_H_ */ diff --git a/TMessagesProj/jni/third_party/usrsctplib/user_ip6_var.h b/TMessagesProj/jni/third_party/usrsctplib/user_ip6_var.h index f5e4a60e4..b970fb8fa 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/user_ip6_var.h +++ b/TMessagesProj/jni/third_party/usrsctplib/user_ip6_var.h @@ -39,7 +39,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors + * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * @@ -60,16 +60,16 @@ #ifndef _USER_IP6_VAR_H_ #define _USER_IP6_VAR_H_ -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) struct ip6_hdr { union { struct ip6_hdrctl { - u_int32_t ip6_un1_flow; /* 20 bits of flow-ID */ - u_int16_t ip6_un1_plen; /* payload length */ - u_int8_t ip6_un1_nxt; /* next header */ - u_int8_t ip6_un1_hlim; /* hop limit */ + uint32_t ip6_un1_flow; /* 20 bits of flow-ID */ + uint16_t ip6_un1_plen; /* payload length */ + uint8_t ip6_un1_nxt; /* next header */ + uint8_t ip6_un1_hlim; /* hop limit */ } ip6_un1; - u_int8_t ip6_un2_vfc; /* 4 bits version, top 4 bits class */ + uint8_t ip6_un2_vfc; /* 4 bits version, top 4 bits class */ } ip6_ctlun; struct in6_addr ip6_src; /* source address */ struct in6_addr ip6_dst; /* destination address */ @@ -84,18 +84,16 @@ struct ip6_hdr { #define IPV6_VERSION 0x60 #endif -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) #define s6_addr16 u.Word #endif -#if !defined(__Userspace_os_Windows) -#if !defined(__Userspace_os_Linux) +#if !defined(_WIN32) && !defined(__linux__) && !defined(__EMSCRIPTEN__) #define s6_addr8 __u6_addr.__u6_addr8 #define s6_addr16 __u6_addr.__u6_addr16 #define s6_addr32 __u6_addr.__u6_addr32 #endif -#endif -#if !defined(__Userspace_os_FreeBSD) && !defined(__Userspace_os_OpenBSD) && !defined(__Userspace_os_DragonFly) +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__DragonFly__) struct route_in6 { struct rtentry *ro_rt; struct llentry *ro_lle; diff --git a/TMessagesProj/jni/third_party/usrsctplib/user_ip_icmp.h b/TMessagesProj/jni/third_party/usrsctplib/user_ip_icmp.h index f6fd5487e..a993411a1 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/user_ip_icmp.h +++ b/TMessagesProj/jni/third_party/usrsctplib/user_ip_icmp.h @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors + * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * @@ -40,8 +40,8 @@ * Internal of an ICMP Router Advertisement */ struct icmp_ra_addr { - u_int32_t ira_addr; - u_int32_t ira_preference; + uint32_t ira_addr; + uint32_t ira_preference; }; /* @@ -53,16 +53,16 @@ struct icmphdr { u_short icmp_cksum; /* ones complement cksum of struct */ }; -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) #pragma pack (push, 1) struct icmp6_hdr { - u_int8_t icmp6_type; - u_int8_t icmp6_code; - u_int16_t icmp6_cksum; + uint8_t icmp6_type; + uint8_t icmp6_code; + uint16_t icmp6_cksum; union { - u_int32_t icmp6_un_data32[1]; - u_int16_t icmp6_un_data16[2]; - u_int8_t icmp6_un_data8[4]; + uint32_t icmp6_un_data32[1]; + uint16_t icmp6_un_data16[2]; + uint8_t icmp6_un_data8[4]; } icmp6_dataun; }; #pragma pack(pop) @@ -98,7 +98,7 @@ struct icmp { struct ih_rtradv { u_char irt_num_addrs; u_char irt_wpa; - u_int16_t irt_lifetime; + uint16_t irt_lifetime; } ih_rtradv; } icmp_hun; #define icmp_pptr icmp_hun.ih_pptr @@ -126,7 +126,7 @@ struct icmp { /* options and then 64 bits of data */ } id_ip; struct icmp_ra_addr id_radv; - u_int32_t id_mask; + uint32_t id_mask; char id_data[1]; } icmp_dun; #define icmp_otime icmp_dun.id_ts.its_otime diff --git a/TMessagesProj/jni/third_party/usrsctplib/user_malloc.h b/TMessagesProj/jni/third_party/usrsctplib/user_malloc.h index 8d4fe8260..c588e094a 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/user_malloc.h +++ b/TMessagesProj/jni/third_party/usrsctplib/user_malloc.h @@ -12,7 +12,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors + * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * @@ -37,7 +37,7 @@ /*__Userspace__*/ #include #include -#if !defined (__Userspace_os_Windows) +#if !defined(_WIN32) #include #include #else @@ -200,67 +200,4 @@ Start copy: Copied code for __Userspace__ */ } \ } while (0); - -/* End copy: Copied code for __Userspace__ */ - -#if 0 -#ifdef _KERNEL -#define MALLOC_DEFINE(type, shortdesc, longdesc) \ - struct malloc_type type[1] = { \ - { NULL, 0, 0, 0, 0, 0, M_MAGIC, shortdesc, NULL, NULL, \ - NULL, 0, NULL, NULL, 0, 0 } \ - }; \ - SYSINIT(type##_init, SI_SUB_KMEM, SI_ORDER_SECOND, malloc_init, \ - type); \ - SYSUNINIT(type##_uninit, SI_SUB_KMEM, SI_ORDER_ANY, \ - malloc_uninit, type) - - -#define MALLOC_DECLARE(type) \ - extern struct malloc_type type[1] - -MALLOC_DECLARE(M_CACHE); -MALLOC_DECLARE(M_DEVBUF); -MALLOC_DECLARE(M_TEMP); - -MALLOC_DECLARE(M_IP6OPT); /* for INET6 */ -MALLOC_DECLARE(M_IP6NDP); /* for INET6 */ - -/* - * Deprecated macro versions of not-quite-malloc() and free(). - */ -#define MALLOC(space, cast, size, type, flags) \ - ((space) = (cast)malloc((u_long)(size), (type), (flags))) -#define FREE(addr, type) free((addr), (type)) - -/* - * XXX this should be declared in , but that tends to fail - * because is included in a header before the source file - * has a chance to include to get MALLOC_DECLARE() defined. - */ -MALLOC_DECLARE(M_IOV); - -extern struct mtx malloc_mtx; - -/* XXX struct malloc_type is unused for contig*(). */ -void contigfree(void *addr, unsigned long size, struct malloc_type *type); -void *contigmalloc(unsigned long size, struct malloc_type *type, int flags, - vm_paddr_t low, vm_paddr_t high, unsigned long alignment, - unsigned long boundary); -void free(void *addr, struct malloc_type *type); -void *malloc(unsigned long size, struct malloc_type *type, int flags); -void malloc_init(void *); -int malloc_last_fail(void); -void malloc_type_allocated(struct malloc_type *type, unsigned long size); -void malloc_type_freed(struct malloc_type *type, unsigned long size); -void malloc_uninit(void *); -void *realloc(void *addr, unsigned long size, struct malloc_type *type, - int flags); -void *reallocf(void *addr, unsigned long size, struct malloc_type *type, - int flags); - - -#endif /* _KERNEL */ -#endif - #endif /* !_SYS_MALLOC_H_ */ diff --git a/TMessagesProj/jni/third_party/usrsctplib/user_mbuf.c b/TMessagesProj/jni/third_party/usrsctplib/user_mbuf.c index a5037249d..f95185f06 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/user_mbuf.c +++ b/TMessagesProj/jni/third_party/usrsctplib/user_mbuf.c @@ -46,7 +46,6 @@ #include "user_atomic.h" #include "netinet/sctp_pcb.h" -struct mbstat mbstat; #define KIPC_MAX_LINKHDR 4 /* int: max length of link header (see sys/sysclt.h) */ #define KIPC_MAX_PROTOHDR 5 /* int: max length of network header (see sys/sysclt.h)*/ int max_linkhdr = KIPC_MAX_LINKHDR; @@ -81,8 +80,6 @@ static void mb_dtor_clust(void *, void *); static int mbuf_constructor_dup(struct mbuf *m, int pkthdr, short type) { int flags = pkthdr; - if (type == MT_NOINIT) - return (0); m->m_next = NULL; m->m_nextpkt = NULL; @@ -272,9 +269,9 @@ m_clget(struct mbuf *m, int how) mclust_ret = SCTP_ZONE_GET(zone_clust, char); #endif /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/ - if (NULL == mclust_ret) { - SCTPDBG(SCTP_DEBUG_USR, "Memory allocation failure in %s\n", __func__); - } + /* if (NULL == mclust_ret) { */ + SCTPDBG(SCTP_DEBUG_USR, "Memory allocation failure in %s\n", __func__); + /* } */ } #if USING_MBUF_CONSTRUCTOR @@ -290,7 +287,7 @@ struct mbuf * m_getm2(struct mbuf *m, int len, int how, short type, int flags, int allonebuf) { struct mbuf *mb, *nm = NULL, *mtail = NULL; - int size = 0, mbuf_threshold, space_needed = len; + int size, mbuf_threshold, space_needed = len; KASSERT(len >= 0, ("%s: len is < 0", __func__)); @@ -473,7 +470,7 @@ m_tag_free(struct m_tag *t) * XXX probably should be called m_tag_init, but that was already taken. */ static __inline void -m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len) +m_tag_setup(struct m_tag *t, uint32_t cookie, int type, int len) { t->m_tag_id = type; @@ -507,7 +504,7 @@ mbuf_initialize(void *dummy) #else zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0, mb_ctor_mbuf, mb_dtor_mbuf, NULL, - NUULL, + NULL, NULL, 0); #endif /*zone_ext_refcnt = umem_cache_create(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0, @@ -536,26 +533,6 @@ mbuf_initialize(void *dummy) * */ - - /* - * [Re]set counters and local statistics knobs. - * - */ - - mbstat.m_mbufs = 0; - mbstat.m_mclusts = 0; - mbstat.m_drain = 0; - mbstat.m_msize = MSIZE; - mbstat.m_mclbytes = MCLBYTES; - mbstat.m_minclsize = MINCLSIZE; - mbstat.m_mlen = MLEN; - mbstat.m_mhlen = MHLEN; - mbstat.m_numtypes = MT_NTYPES; - - mbstat.m_mcfail = mbstat.m_mpfail = 0; - mbstat.sf_iocnt = 0; - mbstat.sf_allocwait = mbstat.sf_allocfail = 0; - } @@ -598,13 +575,6 @@ mb_ctor_mbuf(void *mem, void *arg, int flgs) flags = args->flags; type = args->type; - /* - * The mbuf is initialized later. - * - */ - if (type == MT_NOINIT) - return (0); - m->m_next = NULL; m->m_nextpkt = NULL; m->m_len = 0; @@ -909,7 +879,6 @@ m_pullup(struct mbuf *n, int len) return (m); bad: m_freem(n); - mbstat.m_mpfail++; /* XXX: No consistency. */ return (NULL); } @@ -1045,16 +1014,14 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp) * easy cases first. * we need to use m_copydata() to get data from m_next, 0>. */ - if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen - && writable) { + if ((off == 0 || offp) && (M_TRAILINGSPACE(n) >= tlen) && writable) { m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len); n->m_len += tlen; m_adj(n->m_next, tlen); goto ok; } - if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen - && writable) { + if ((off == 0 || offp) && (M_LEADINGSPACE(n->m_next) >= hlen) && writable) { n->m_next->m_data -= hlen; n->m_next->m_len += hlen; memcpy( mtod(n->m_next, caddr_t), mtod(n, caddr_t) + off,hlen); @@ -1198,13 +1165,10 @@ m_copym(struct mbuf *m, int off0, int len, int wait) m = m->m_next; np = &n->m_next; } - if (top == NULL) - mbstat.m_mcfail++; /* XXX: No consistency. */ return (top); nospace: m_freem(top); - mbstat.m_mcfail++; /* XXX: No consistency. */ return (NULL); } @@ -1266,7 +1230,7 @@ m_tag_copy(struct m_tag *t, int how) /* Get a packet tag structure along with specified data following. */ struct m_tag * -m_tag_alloc(u_int32_t cookie, int type, int len, int wait) +m_tag_alloc(uint32_t cookie, int type, int len, int wait) { struct m_tag *t; diff --git a/TMessagesProj/jni/third_party/usrsctplib/user_mbuf.h b/TMessagesProj/jni/third_party/usrsctplib/user_mbuf.h index 9c5d0d7ef..dc1725068 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/user_mbuf.h +++ b/TMessagesProj/jni/third_party/usrsctplib/user_mbuf.h @@ -110,7 +110,7 @@ void m_cat(struct mbuf *m, struct mbuf *n); void m_adj(struct mbuf *, int); void mb_free_ext(struct mbuf *); void m_freem(struct mbuf *); -struct m_tag *m_tag_alloc(u_int32_t, int, int, int); +struct m_tag *m_tag_alloc(uint32_t, int, int, int); struct mbuf *m_copym(struct mbuf *, int, int, int); void m_copyback(struct mbuf *, int, int, caddr_t); struct mbuf *m_pullup(struct mbuf *, int); @@ -125,38 +125,6 @@ void m_copydata(const struct mbuf *, int, int, caddr_t); #define MBUF_CLUSTER_MEM_NAME "mbuf_cluster" #define MBUF_EXTREFCNT_MEM_NAME "mbuf_ext_refcnt" -#define MT_NOINIT 255 /* Not a type but a flag to allocate - a non-initialized mbuf */ - -/* - * General mbuf allocator statistics structure. - * __Userspace__ mbstat may be useful for gathering statistics. - * In the kernel many of these statistics are no longer used as - * they track allocator statistics through kernel UMA's built in statistics mechanism. - */ -struct mbstat { - u_long m_mbufs; /* XXX */ - u_long m_mclusts; /* XXX */ - - u_long m_drain; /* times drained protocols for space */ - u_long m_mcfail; /* XXX: times m_copym failed */ - u_long m_mpfail; /* XXX: times m_pullup failed */ - u_long m_msize; /* length of an mbuf */ - u_long m_mclbytes; /* length of an mbuf cluster */ - u_long m_minclsize; /* min length of data to allocate a cluster */ - u_long m_mlen; /* length of data in an mbuf */ - u_long m_mhlen; /* length of data in a header mbuf */ - - /* Number of mbtypes (gives # elems in mbtypes[] array: */ - short m_numtypes; - - /* XXX: Sendfile stats should eventually move to their own struct */ - u_long sf_iocnt; /* times sendfile had to do disk I/O */ - u_long sf_allocfail; /* times sfbuf allocation failed */ - u_long sf_allocwait; /* times sfbuf allocation had to wait */ -}; - - /* * Mbufs are of a single size, MSIZE (sys/param.h), which includes overhead. * An mbuf may add a single "mbuf cluster" of size MCLBYTES (also in @@ -190,9 +158,9 @@ struct m_hdr { */ struct m_tag { SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */ - u_int16_t m_tag_id; /* Tag ID */ - u_int16_t m_tag_len; /* Length of data */ - u_int32_t m_tag_cookie; /* ABI/Module ID */ + uint16_t m_tag_id; /* Tag ID */ + uint16_t m_tag_len; /* Length of data */ + uint32_t m_tag_cookie; /* ABI/Module ID */ void (*m_tag_free)(struct m_tag *); }; @@ -207,8 +175,8 @@ struct pkthdr { /* variables for hardware checksum */ int csum_flags; /* flags regarding checksum */ int csum_data; /* data field used by csum routines */ - u_int16_t tso_segsz; /* TSO segment size */ - u_int16_t ether_vtag; /* Ethernet 802.1p+q vlan tag */ + uint16_t tso_segsz; /* TSO segment size */ + uint16_t ether_vtag; /* Ethernet 802.1p+q vlan tag */ SLIST_HEAD(packet_tags, m_tag) tags; /* list of packet tags */ }; @@ -322,9 +290,6 @@ struct mbuf { #define MT_OOBDATA 15 /* expedited data */ #define MT_NTYPES 16 /* number of mbuf types for mbtypes[] */ -#define MT_NOINIT 255 /* Not a type but a flag to allocate - a non-initialized mbuf */ - /* * __Userspace__ flags like M_NOWAIT are defined in malloc.h * Flags like these are used in functions like uma_zalloc() @@ -352,9 +317,6 @@ void m_tag_free_default(struct m_tag *); extern int max_linkhdr; /* Largest link-level header */ extern int max_protohdr; /* Size of largest protocol layer header. See user_mbuf.c */ -extern struct mbstat mbstat; /* General mbuf stats/infos */ - - /* * Evaluate TRUE if it's safe to write to the mbuf m's data region (this can * be both the local data payload, or an external buffer area, depending on @@ -373,9 +335,9 @@ extern struct mbstat mbstat; /* General mbuf stats/infos */ * of checking writability of the mbuf data area rests solely with the caller. */ #define M_LEADINGSPACE(m) \ - ((m)->m_flags & M_EXT ? \ + (((m)->m_flags & M_EXT) ? \ (M_WRITABLE(m) ? (m)->m_data - (m)->m_ext.ext_buf : 0): \ - (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat : \ + ((m)->m_flags & M_PKTHDR)? (m)->m_data - (m)->m_pktdat : \ (m)->m_data - (m)->m_dat) /* @@ -385,7 +347,7 @@ extern struct mbstat mbstat; /* General mbuf stats/infos */ * of checking writability of the mbuf data area rests solely with the caller. */ #define M_TRAILINGSPACE(m) \ - ((m)->m_flags & M_EXT ? \ + (((m)->m_flags & M_EXT) ? \ (M_WRITABLE(m) ? (m)->m_ext.ext_buf + (m)->m_ext.ext_size \ - ((m)->m_data + (m)->m_len) : 0) : \ &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len)) @@ -437,4 +399,9 @@ extern struct mbstat mbstat; /* General mbuf stats/infos */ (m)->m_data += (MHLEN - (len)) & ~(sizeof(long) - 1); \ } while (0) +#define M_SIZE(m) \ + (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_size : \ + ((m)->m_flags & M_PKTHDR) ? MHLEN : \ + MLEN) + #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/user_queue.h b/TMessagesProj/jni/third_party/usrsctplib/user_queue.h index 44f899490..fcd368bdd 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/user_queue.h +++ b/TMessagesProj/jni/third_party/usrsctplib/user_queue.h @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors + * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * @@ -144,8 +144,8 @@ struct name { \ #define SLIST_HEAD_INITIALIZER(head) \ { NULL } -#if defined (__Userspace_os_Windows) -#if defined (SLIST_ENTRY) +#if defined(_WIN32) +#if defined(SLIST_ENTRY) #undef SLIST_ENTRY #endif #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/user_recv_thread.c b/TMessagesProj/jni/third_party/usrsctplib/user_recv_thread.c index ae5e517da..66be13d74 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/user_recv_thread.c +++ b/TMessagesProj/jni/third_party/usrsctplib/user_recv_thread.c @@ -30,12 +30,12 @@ #if defined(INET) || defined(INET6) #include -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) #include #include #include #include -#if !defined(__Userspace_os_DragonFly) && !defined(__Userspace_os_FreeBSD) && !defined(__Userspace_os_NetBSD) +#if !defined(__DragonFly__) && !defined(__FreeBSD__) && !defined(__NetBSD__) #include #else #include @@ -46,7 +46,7 @@ #include #include #if 0 -#if defined(__Userspace_os_Linux) +#if defined(__linux__) #include #ifdef HAVE_LINUX_IF_ADDR_H #include @@ -56,23 +56,32 @@ #endif #endif #endif -#if defined(__Userspace_os_Darwin) || defined(__Userspace_os_DragonFly) || defined(__Userspace_os_FreeBSD) -#include +#if defined(HAVE_NET_ROUTE_H) +# include +#elif defined(__APPLE__) +/* Apple SDKs for iOS, tvOS, watchOS, etc. don't ship this header */ +# define RTM_NEWADDR 0xc +# define RTM_DELADDR 0xd +# define RTAX_IFA 5 +# define RTAX_MAX 8 #endif /* local macros and datatypes used to get IP addresses system independently */ -#if !defined(IP_PKTINFO ) && ! defined(IP_RECVDSTADDR) +#if !defined(IP_PKTINFO) && !defined(IP_RECVDSTADDR) # error "Can't determine socket option to use to get UDP IP" #endif void recv_thread_destroy(void); -#define MAXLEN_MBUF_CHAIN 32 /* What should this value be? */ + +#define MAXLEN_MBUF_CHAIN 128 + #define ROUNDUP(a, size) (((a) & ((size)-1)) ? (1 + ((a) | ((size)-1))) : (a)) -#if defined(__Userspace_os_Darwin) || defined(__Userspace_os_DragonFly) || defined(__Userspace_os_FreeBSD) + +#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) #define NEXT_SA(ap) ap = (struct sockaddr *) \ ((caddr_t) ap + (ap->sa_len ? ROUNDUP(ap->sa_len, sizeof (uint32_t)) : sizeof(uint32_t))) #endif -#if defined(__Userspace_os_Darwin) || defined(__Userspace_os_DragonFly) || defined(__Userspace_os_FreeBSD) +#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) static void sctp_get_rtaddrs(int addrs, struct sockaddr *sa, struct sockaddr **rti_info) { @@ -270,7 +279,8 @@ recv_function_raw(void *arg) int compute_crc = 1; struct sctp_chunkhdr *ch; struct sockaddr_in src, dst; -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) + ssize_t res; unsigned int ncounter; struct msghdr msg; struct iovec recv_iovec[MAXLEN_MBUF_CHAIN]; @@ -305,7 +315,7 @@ recv_function_raw(void *arg) Have tried both sending and receiving */ recvmbuf[i] = sctp_get_mbuf_for_msg(iovlen, want_header, M_NOWAIT, want_ext, MT_DATA); -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) recv_iovec[i].iov_base = (caddr_t)recvmbuf[i]->m_data; recv_iovec[i].iov_len = iovlen; #else @@ -314,7 +324,7 @@ recv_function_raw(void *arg) #endif } to_fill = 0; -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) flags = 0; ncounter = 0; fromlen = sizeof(struct sockaddr_in); @@ -337,14 +347,16 @@ recv_function_raw(void *arg) msg.msg_iovlen = MAXLEN_MBUF_CHAIN; msg.msg_control = NULL; msg.msg_controllen = 0; - ncounter = n = recvmsg(SCTP_BASE_VAR(userspace_rawsctp), &msg, 0); - if (n < 0) { + res = recvmsg(SCTP_BASE_VAR(userspace_rawsctp), &msg, 0); + if (res < 0) { if (errno == EAGAIN || errno == EINTR) { continue; } else { break; } } + ncounter = (unsigned int)res; + n = (int)res; #endif SCTP_HEADER_LEN(recvmbuf[0]) = n; /* length of total packet */ SCTP_STAT_INCR(sctps_recvpackets); @@ -368,13 +380,20 @@ recv_function_raw(void *arg) } while (ncounter > 0); } + offset = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); + if (SCTP_BUF_LEN(recvmbuf[0]) < offset) { + if ((recvmbuf[0] = m_pullup(recvmbuf[0], offset)) == NULL) { + SCTP_STAT_INCR(sctps_hdrops); + continue; + } + } iphdr = mtod(recvmbuf[0], struct ip *); sh = (struct sctphdr *)((caddr_t)iphdr + sizeof(struct ip)); ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr)); - offset = sizeof(struct ip) + sizeof(struct sctphdr); + offset -= sizeof(struct sctp_chunkhdr); if (iphdr->ip_tos != 0) { - ecn = iphdr->ip_tos & 0x02; + ecn = iphdr->ip_tos & 0x03; } dst.sin_family = AF_INET; @@ -430,7 +449,7 @@ recv_function_raw(void *arg) } /* free the array itself */ free(recvmbuf); - SCTPDBG(SCTP_DEBUG_USR, "%s: Exiting SCTP/IP4 rcv", __func__); + SCTPDBG(SCTP_DEBUG_USR, "%s: Exiting SCTP/IP4 rcv\n", __func__); return (NULL); } #endif @@ -440,8 +459,9 @@ static void * recv_function_raw6(void *arg) { struct mbuf **recvmbuf6; -#if !defined(__Userspace_os_Windows) - unsigned int ncounter = 0; +#if !defined(_WIN32) + ssize_t res; + unsigned int ncounter; struct iovec recv_iovec[MAXLEN_MBUF_CHAIN]; struct msghdr msg; struct cmsghdr *cmsgptr; @@ -482,7 +502,7 @@ recv_function_raw6(void *arg) Have tried both sending and receiving */ recvmbuf6[i] = sctp_get_mbuf_for_msg(iovlen, want_header, M_NOWAIT, want_ext, MT_DATA); -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) recv_iovec[i].iov_base = (caddr_t)recvmbuf6[i]->m_data; recv_iovec[i].iov_len = iovlen; #else @@ -491,7 +511,7 @@ recv_function_raw6(void *arg) #endif } to_fill = 0; -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) ncounter = 0; memset(&from, 0, sizeof(struct sockaddr_in6)); nResult = WSAIoctl(SCTP_BASE_VAR(userspace_rawsctp6), SIO_GET_EXTENSION_FUNCTION_POINTER, @@ -528,15 +548,16 @@ recv_function_raw6(void *arg) msg.msg_control = (void *)cmsgbuf; msg.msg_controllen = (socklen_t)CMSG_SPACE(sizeof (struct in6_pktinfo)); msg.msg_flags = 0; - - ncounter = n = recvmsg(SCTP_BASE_VAR(userspace_rawsctp6), &msg, 0); - if (n < 0) { + res = recvmsg(SCTP_BASE_VAR(userspace_rawsctp6), &msg, 0); + if (res < 0) { if (errno == EAGAIN || errno == EINTR) { continue; } else { break; } } + ncounter = (unsigned int)res; + n = (int)res; #endif SCTP_HEADER_LEN(recvmbuf6[0]) = n; /* length of total packet */ SCTP_STAT_INCR(sctps_recvpackets); @@ -576,9 +597,16 @@ recv_function_raw6(void *arg) continue; } + offset = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); + if (SCTP_BUF_LEN(recvmbuf6[0]) < offset) { + if ((recvmbuf6[0] = m_pullup(recvmbuf6[0], offset)) == NULL) { + SCTP_STAT_INCR(sctps_hdrops); + continue; + } + } sh = mtod(recvmbuf6[0], struct sctphdr *); ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr)); - offset = sizeof(struct sctphdr); + offset -= sizeof(struct sctp_chunkhdr); dst.sin6_family = AF_INET6; #ifdef HAVE_SIN6_LEN @@ -591,7 +619,8 @@ recv_function_raw6(void *arg) src.sin6_len = sizeof(struct sockaddr_in6); #endif src.sin6_port = sh->src_port; - if (memcmp(&src.sin6_addr, &dst.sin6_addr, sizeof(struct in6_addr)) == 0) { + if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) && + (memcmp(&src.sin6_addr, &dst.sin6_addr, sizeof(struct in6_addr)) == 0)) { compute_crc = 0; SCTP_STAT_INCR(sctps_recvhwcrc); } else { @@ -615,7 +644,7 @@ recv_function_raw6(void *arg) } /* free the array itself */ free(recvmbuf6); - SCTPDBG(SCTP_DEBUG_USR, "%s: Exiting SCTP/IP6 rcv", __func__); + SCTPDBG(SCTP_DEBUG_USR, "%s: Exiting SCTP/IP6 rcv\n", __func__); return (NULL); } #endif @@ -643,7 +672,8 @@ recv_function_udp(void *arg) char cmsgbuf[CMSG_SPACE(sizeof(struct in_addr))]; #endif int compute_crc = 1; -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) + ssize_t res; unsigned int ncounter; struct iovec iov[MAXLEN_MBUF_CHAIN]; struct msghdr msg; @@ -670,7 +700,7 @@ recv_function_udp(void *arg) Have tried both sending and receiving */ udprecvmbuf[i] = sctp_get_mbuf_for_msg(iovlen, want_header, M_NOWAIT, want_ext, MT_DATA); -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) iov[i].iov_base = (caddr_t)udprecvmbuf[i]->m_data; iov[i].iov_len = iovlen; #else @@ -679,7 +709,7 @@ recv_function_udp(void *arg) #endif } to_fill = 0; -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) memset(&msg, 0, sizeof(struct msghdr)); #else memset(&msg, 0, sizeof(WSAMSG)); @@ -688,7 +718,7 @@ recv_function_udp(void *arg) memset(&dst, 0, sizeof(struct sockaddr_in)); memset(cmsgbuf, 0, sizeof(cmsgbuf)); -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) msg.msg_name = (void *)&src; msg.msg_namelen = sizeof(struct sockaddr_in); msg.msg_iov = iov; @@ -697,14 +727,16 @@ recv_function_udp(void *arg) msg.msg_controllen = sizeof(cmsgbuf); msg.msg_flags = 0; - ncounter = n = recvmsg(SCTP_BASE_VAR(userspace_udpsctp), &msg, 0); - if (n < 0) { + res = recvmsg(SCTP_BASE_VAR(userspace_udpsctp), &msg, 0); + if (res < 0) { if (errno == EAGAIN || errno == EINTR) { continue; } else { break; } } + ncounter = (unsigned int)res; + n = (int)res; #else nResult = WSAIoctl(SCTP_BASE_VAR(userspace_udpsctp), SIO_GET_EXTENSION_FUNCTION_POINTER, &WSARecvMsg_GUID, sizeof WSARecvMsg_GUID, @@ -789,14 +821,22 @@ recv_function_udp(void *arg) continue; } - /*offset = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);*/ + offset = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); + if (SCTP_BUF_LEN(udprecvmbuf[0]) < offset) { + if ((udprecvmbuf[0] = m_pullup(udprecvmbuf[0], offset)) == NULL) { + SCTP_STAT_INCR(sctps_hdrops); + continue; + } + } sh = mtod(udprecvmbuf[0], struct sctphdr *); ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr)); - offset = sizeof(struct sctphdr); + offset -= sizeof(struct sctp_chunkhdr); + port = src.sin_port; src.sin_port = sh->src_port; dst.sin_port = sh->dest_port; - if (src.sin_addr.s_addr == dst.sin_addr.s_addr) { + if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) && + (src.sin_addr.s_addr == dst.sin_addr.s_addr)) { compute_crc = 0; SCTP_STAT_INCR(sctps_recvhwcrc); } else { @@ -820,7 +860,7 @@ recv_function_udp(void *arg) } /* free the array itself */ free(udprecvmbuf); - SCTPDBG(SCTP_DEBUG_USR, "%s: Exiting SCTP/UDP/IP4 rcv", __func__); + SCTPDBG(SCTP_DEBUG_USR, "%s: Exiting SCTP/UDP/IP4 rcv\n", __func__); return (NULL); } #endif @@ -844,10 +884,11 @@ recv_function_udp6(void *arg) struct sctp_chunkhdr *ch; char cmsgbuf[CMSG_SPACE(sizeof (struct in6_pktinfo))]; int compute_crc = 1; -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) struct iovec iov[MAXLEN_MBUF_CHAIN]; struct msghdr msg; struct cmsghdr *cmsgptr; + ssize_t res; unsigned int ncounter; #else GUID WSARecvMsg_GUID = WSAID_WSARECVMSG; @@ -870,7 +911,7 @@ recv_function_udp6(void *arg) Have tried both sending and receiving */ udprecvmbuf6[i] = sctp_get_mbuf_for_msg(iovlen, want_header, M_NOWAIT, want_ext, MT_DATA); -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) iov[i].iov_base = (caddr_t)udprecvmbuf6[i]->m_data; iov[i].iov_len = iovlen; #else @@ -880,7 +921,7 @@ recv_function_udp6(void *arg) } to_fill = 0; -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) memset(&msg, 0, sizeof(struct msghdr)); #else memset(&msg, 0, sizeof(WSAMSG)); @@ -889,7 +930,7 @@ recv_function_udp6(void *arg) memset(&dst, 0, sizeof(struct sockaddr_in6)); memset(cmsgbuf, 0, CMSG_SPACE(sizeof (struct in6_pktinfo))); -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) msg.msg_name = (void *)&src; msg.msg_namelen = sizeof(struct sockaddr_in6); msg.msg_iov = iov; @@ -898,14 +939,16 @@ recv_function_udp6(void *arg) msg.msg_controllen = (socklen_t)CMSG_SPACE(sizeof (struct in6_pktinfo)); msg.msg_flags = 0; - ncounter = n = recvmsg(SCTP_BASE_VAR(userspace_udpsctp6), &msg, 0); - if (n < 0) { + res = recvmsg(SCTP_BASE_VAR(userspace_udpsctp6), &msg, 0); + if (res < 0) { if (errno == EAGAIN || errno == EINTR) { continue; } else { break; } } + ncounter = (unsigned int)res; + n = (int)res; #else nResult = WSAIoctl(SCTP_BASE_VAR(userspace_udpsctp6), SIO_GET_EXTENSION_FUNCTION_POINTER, &WSARecvMsg_GUID, sizeof WSARecvMsg_GUID, @@ -976,14 +1019,22 @@ recv_function_udp6(void *arg) continue; } + offset = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); + if (SCTP_BUF_LEN(udprecvmbuf6[0]) < offset) { + if ((udprecvmbuf6[0] = m_pullup(udprecvmbuf6[0], offset)) == NULL) { + SCTP_STAT_INCR(sctps_hdrops); + continue; + } + } sh = mtod(udprecvmbuf6[0], struct sctphdr *); ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr)); - offset = sizeof(struct sctphdr); + offset -= sizeof(struct sctp_chunkhdr); port = src.sin6_port; src.sin6_port = sh->src_port; dst.sin6_port = sh->dest_port; - if ((memcmp(&src.sin6_addr, &dst.sin6_addr, sizeof(struct in6_addr)) == 0)) { + if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) && + (memcmp(&src.sin6_addr, &dst.sin6_addr, sizeof(struct in6_addr)) == 0)) { compute_crc = 0; SCTP_STAT_INCR(sctps_recvhwcrc); } else { @@ -1007,12 +1058,12 @@ recv_function_udp6(void *arg) } /* free the array itself */ free(udprecvmbuf6); - SCTPDBG(SCTP_DEBUG_USR, "%s: Exiting SCTP/UDP/IP6 rcv", __func__); + SCTPDBG(SCTP_DEBUG_USR, "%s: Exiting SCTP/UDP/IP6 rcv\n", __func__); return (NULL); } #endif -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) static void setReceiveBufferSize(SOCKET sfd, int new_size) #else @@ -1023,7 +1074,7 @@ setReceiveBufferSize(int sfd, int new_size) int ch = new_size; if (setsockopt (sfd, SOL_SOCKET, SO_RCVBUF, (void*)&ch, sizeof(ch)) < 0) { -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't set recv-buffers size (errno = %d).\n", WSAGetLastError()); #else SCTPDBG(SCTP_DEBUG_USR, "Can't set recv-buffers size (errno = %d).\n", errno); @@ -1032,7 +1083,7 @@ setReceiveBufferSize(int sfd, int new_size) return; } -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) static void setSendBufferSize(SOCKET sfd, int new_size) #else @@ -1043,7 +1094,7 @@ setSendBufferSize(int sfd, int new_size) int ch = new_size; if (setsockopt (sfd, SOL_SOCKET, SO_SNDBUF, (void*)&ch, sizeof(ch)) < 0) { -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't set send-buffers size (errno = %d).\n", WSAGetLastError()); #else SCTPDBG(SCTP_DEBUG_USR, "Can't set send-buffers size (errno = %d).\n", errno); @@ -1066,7 +1117,7 @@ recv_thread_init(void) #if defined(INET) || defined(INET6) const int on = 1; #endif -#if !defined(__Userspace_os_Windows) +#if !defined(_WIN32) struct timeval timeout; memset(&timeout, 0, sizeof(struct timeval)); @@ -1075,7 +1126,7 @@ recv_thread_init(void) #else unsigned int timeout = SOCKET_TIMEOUT; /* Timeout in milliseconds */ #endif -#if defined(__Userspace_os_Darwin) || defined(__Userspace_os_DragonFly) || defined(__Userspace_os_FreeBSD) +#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) if (SCTP_BASE_VAR(userspace_route) == -1) { if ((SCTP_BASE_VAR(userspace_route) = socket(AF_ROUTE, SOCK_RAW, 0)) == -1) { SCTPDBG(SCTP_DEBUG_USR, "Can't create routing socket (errno = %d).\n", errno); @@ -1104,7 +1155,7 @@ recv_thread_init(void) if (SCTP_BASE_VAR(userspace_route) != -1) { if (setsockopt(SCTP_BASE_VAR(userspace_route), SOL_SOCKET, SO_RCVTIMEO,(const void*)&timeout, sizeof(struct timeval)) < 0) { SCTPDBG(SCTP_DEBUG_USR, "Can't set timeout on routing socket (errno = %d).\n", errno); -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) closesocket(SCTP_BASE_VAR(userspace_route)); #else close(SCTP_BASE_VAR(userspace_route)); @@ -1117,7 +1168,7 @@ recv_thread_init(void) #if defined(INET) if (SCTP_BASE_VAR(userspace_rawsctp) == -1) { if ((SCTP_BASE_VAR(userspace_rawsctp) = socket(AF_INET, SOCK_RAW, IPPROTO_SCTP)) == -1) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't create raw socket for IPv4 (errno = %d).\n", WSAGetLastError()); #else SCTPDBG(SCTP_DEBUG_USR, "Can't create raw socket for IPv4 (errno = %d).\n", errno); @@ -1125,7 +1176,7 @@ recv_thread_init(void) } else { /* complete setting up the raw SCTP socket */ if (setsockopt(SCTP_BASE_VAR(userspace_rawsctp), IPPROTO_IP, IP_HDRINCL,(const void*)&hdrincl, sizeof(int)) < 0) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't set IP_HDRINCL (errno = %d).\n", WSAGetLastError()); closesocket(SCTP_BASE_VAR(userspace_rawsctp)); #else @@ -1134,7 +1185,7 @@ recv_thread_init(void) #endif SCTP_BASE_VAR(userspace_rawsctp) = -1; } else if (setsockopt(SCTP_BASE_VAR(userspace_rawsctp), SOL_SOCKET, SO_RCVTIMEO, (const void *)&timeout, sizeof(timeout)) < 0) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't set timeout on socket for SCTP/IPv4 (errno = %d).\n", WSAGetLastError()); closesocket(SCTP_BASE_VAR(userspace_rawsctp)); #else @@ -1151,7 +1202,7 @@ recv_thread_init(void) addr_ipv4.sin_port = htons(0); addr_ipv4.sin_addr.s_addr = htonl(INADDR_ANY); if (bind(SCTP_BASE_VAR(userspace_rawsctp), (const struct sockaddr *)&addr_ipv4, sizeof(struct sockaddr_in)) < 0) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't bind socket for SCTP/IPv4 (errno = %d).\n", WSAGetLastError()); closesocket(SCTP_BASE_VAR(userspace_rawsctp)); #else @@ -1166,9 +1217,9 @@ recv_thread_init(void) } } } - if (SCTP_BASE_VAR(userspace_udpsctp) == -1) { + if ((SCTP_BASE_VAR(userspace_udpsctp) == -1) && (SCTP_BASE_SYSCTL(sctp_udp_tunneling_port) != 0)) { if ((SCTP_BASE_VAR(userspace_udpsctp) = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) == -1) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't create socket for SCTP/UDP/IPv4 (errno = %d).\n", WSAGetLastError()); #else SCTPDBG(SCTP_DEBUG_USR, "Can't create socket for SCTP/UDP/IPv4 (errno = %d).\n", errno); @@ -1179,7 +1230,7 @@ recv_thread_init(void) #else if (setsockopt(SCTP_BASE_VAR(userspace_udpsctp), IPPROTO_IP, IP_RECVDSTADDR, (const void *)&on, (int)sizeof(int)) < 0) { #endif -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) #if defined(IP_PKTINFO) SCTPDBG(SCTP_DEBUG_USR, "Can't set IP_PKTINFO on socket for SCTP/UDP/IPv4 (errno = %d).\n", WSAGetLastError()); #else @@ -1196,7 +1247,7 @@ recv_thread_init(void) #endif SCTP_BASE_VAR(userspace_udpsctp) = -1; } else if (setsockopt(SCTP_BASE_VAR(userspace_udpsctp), SOL_SOCKET, SO_RCVTIMEO, (const void *)&timeout, sizeof(timeout)) < 0) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't set timeout on socket for SCTP/UDP/IPv4 (errno = %d).\n", WSAGetLastError()); closesocket(SCTP_BASE_VAR(userspace_udpsctp)); #else @@ -1213,7 +1264,7 @@ recv_thread_init(void) addr_ipv4.sin_port = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); addr_ipv4.sin_addr.s_addr = htonl(INADDR_ANY); if (bind(SCTP_BASE_VAR(userspace_udpsctp), (const struct sockaddr *)&addr_ipv4, sizeof(struct sockaddr_in)) < 0) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't bind socket for SCTP/UDP/IPv4 (errno = %d).\n", WSAGetLastError()); closesocket(SCTP_BASE_VAR(userspace_udpsctp)); #else @@ -1232,7 +1283,7 @@ recv_thread_init(void) #if defined(INET6) if (SCTP_BASE_VAR(userspace_rawsctp6) == -1) { if ((SCTP_BASE_VAR(userspace_rawsctp6) = socket(AF_INET6, SOCK_RAW, IPPROTO_SCTP)) == -1) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't create socket for SCTP/IPv6 (errno = %d).\n", WSAGetLastError()); #else SCTPDBG(SCTP_DEBUG_USR, "Can't create socket for SCTP/IPv6 (errno = %d).\n", errno); @@ -1241,7 +1292,7 @@ recv_thread_init(void) /* complete setting up the raw SCTP socket */ #if defined(IPV6_RECVPKTINFO) if (setsockopt(SCTP_BASE_VAR(userspace_rawsctp6), IPPROTO_IPV6, IPV6_RECVPKTINFO, (const void *)&on, sizeof(on)) < 0) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_RECVPKTINFO on socket for SCTP/IPv6 (errno = %d).\n", WSAGetLastError()); closesocket(SCTP_BASE_VAR(userspace_rawsctp6)); #else @@ -1252,7 +1303,7 @@ recv_thread_init(void) } else { #else if (setsockopt(SCTP_BASE_VAR(userspace_rawsctp6), IPPROTO_IPV6, IPV6_PKTINFO,(const void*)&on, sizeof(on)) < 0) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_PKTINFO on socket for SCTP/IPv6 (errno = %d).\n", WSAGetLastError()); closesocket(SCTP_BASE_VAR(userspace_rawsctp6)); #else @@ -1263,14 +1314,14 @@ recv_thread_init(void) } else { #endif if (setsockopt(SCTP_BASE_VAR(userspace_rawsctp6), IPPROTO_IPV6, IPV6_V6ONLY, (const void*)&on, (socklen_t)sizeof(on)) < 0) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_V6ONLY on socket for SCTP/IPv6 (errno = %d).\n", WSAGetLastError()); #else SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_V6ONLY on socket for SCTP/IPv6 (errno = %d).\n", errno); #endif } if (setsockopt(SCTP_BASE_VAR(userspace_rawsctp6), SOL_SOCKET, SO_RCVTIMEO, (const void *)&timeout, sizeof(timeout)) < 0) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't set timeout on socket for SCTP/IPv6 (errno = %d).\n", WSAGetLastError()); closesocket(SCTP_BASE_VAR(userspace_rawsctp6)); #else @@ -1287,7 +1338,7 @@ recv_thread_init(void) addr_ipv6.sin6_port = htons(0); addr_ipv6.sin6_addr = in6addr_any; if (bind(SCTP_BASE_VAR(userspace_rawsctp6), (const struct sockaddr *)&addr_ipv6, sizeof(struct sockaddr_in6)) < 0) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't bind socket for SCTP/IPv6 (errno = %d).\n", WSAGetLastError()); closesocket(SCTP_BASE_VAR(userspace_rawsctp6)); #else @@ -1303,9 +1354,9 @@ recv_thread_init(void) } } } - if (SCTP_BASE_VAR(userspace_udpsctp6) == -1) { + if ((SCTP_BASE_VAR(userspace_udpsctp6) == -1) && (SCTP_BASE_SYSCTL(sctp_udp_tunneling_port) != 0)) { if ((SCTP_BASE_VAR(userspace_udpsctp6) = socket(AF_INET6, SOCK_DGRAM, IPPROTO_UDP)) == -1) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't create socket for SCTP/UDP/IPv6 (errno = %d).\n", WSAGetLastError()); #else SCTPDBG(SCTP_DEBUG_USR, "Can't create socket for SCTP/UDP/IPv6 (errno = %d).\n", errno); @@ -1313,7 +1364,7 @@ recv_thread_init(void) } #if defined(IPV6_RECVPKTINFO) if (setsockopt(SCTP_BASE_VAR(userspace_udpsctp6), IPPROTO_IPV6, IPV6_RECVPKTINFO, (const void *)&on, (int)sizeof(int)) < 0) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_RECVPKTINFO on socket for SCTP/UDP/IPv6 (errno = %d).\n", WSAGetLastError()); closesocket(SCTP_BASE_VAR(userspace_udpsctp6)); #else @@ -1324,7 +1375,7 @@ recv_thread_init(void) } else { #else if (setsockopt(SCTP_BASE_VAR(userspace_udpsctp6), IPPROTO_IPV6, IPV6_PKTINFO, (const void *)&on, (int)sizeof(int)) < 0) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_PKTINFO on socket for SCTP/UDP/IPv6 (errno = %d).\n", WSAGetLastError()); closesocket(SCTP_BASE_VAR(userspace_udpsctp6)); #else @@ -1335,14 +1386,14 @@ recv_thread_init(void) } else { #endif if (setsockopt(SCTP_BASE_VAR(userspace_udpsctp6), IPPROTO_IPV6, IPV6_V6ONLY, (const void *)&on, (socklen_t)sizeof(on)) < 0) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_V6ONLY on socket for SCTP/UDP/IPv6 (errno = %d).\n", WSAGetLastError()); #else SCTPDBG(SCTP_DEBUG_USR, "Can't set IPV6_V6ONLY on socket for SCTP/UDP/IPv6 (errno = %d).\n", errno); #endif } if (setsockopt(SCTP_BASE_VAR(userspace_udpsctp6), SOL_SOCKET, SO_RCVTIMEO, (const void *)&timeout, sizeof(timeout)) < 0) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't set timeout on socket for SCTP/UDP/IPv6 (errno = %d).\n", WSAGetLastError()); closesocket(SCTP_BASE_VAR(userspace_udpsctp6)); #else @@ -1359,7 +1410,7 @@ recv_thread_init(void) addr_ipv6.sin6_port = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); addr_ipv6.sin6_addr = in6addr_any; if (bind(SCTP_BASE_VAR(userspace_udpsctp6), (const struct sockaddr *)&addr_ipv6, sizeof(struct sockaddr_in6)) < 0) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) SCTPDBG(SCTP_DEBUG_USR, "Can't bind socket for SCTP/UDP/IPv6 (errno = %d).\n", WSAGetLastError()); closesocket(SCTP_BASE_VAR(userspace_udpsctp6)); #else @@ -1375,7 +1426,7 @@ recv_thread_init(void) } } #endif -#if defined(__Userspace_os_Darwin) || defined(__Userspace_os_DragonFly) || defined(__Userspace_os_FreeBSD) +#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) #if defined(INET) || defined(INET6) if (SCTP_BASE_VAR(userspace_route) != -1) { int rc; @@ -1394,7 +1445,7 @@ recv_thread_init(void) if ((rc = sctp_userspace_thread_create(&SCTP_BASE_VAR(recvthreadraw), &recv_function_raw))) { SCTPDBG(SCTP_DEBUG_USR, "Can't start SCTP/IPv4 recv thread (%d).\n", rc); -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) closesocket(SCTP_BASE_VAR(userspace_rawsctp)); #else close(SCTP_BASE_VAR(userspace_rawsctp)); @@ -1407,7 +1458,7 @@ recv_thread_init(void) if ((rc = sctp_userspace_thread_create(&SCTP_BASE_VAR(recvthreadudp), &recv_function_udp))) { SCTPDBG(SCTP_DEBUG_USR, "Can't start SCTP/UDP/IPv4 recv thread (%d).\n", rc); -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) closesocket(SCTP_BASE_VAR(userspace_udpsctp)); #else close(SCTP_BASE_VAR(userspace_udpsctp)); @@ -1422,7 +1473,7 @@ recv_thread_init(void) if ((rc = sctp_userspace_thread_create(&SCTP_BASE_VAR(recvthreadraw6), &recv_function_raw6))) { SCTPDBG(SCTP_DEBUG_USR, "Can't start SCTP/IPv6 recv thread (%d).\n", rc); -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) closesocket(SCTP_BASE_VAR(userspace_rawsctp6)); #else close(SCTP_BASE_VAR(userspace_rawsctp6)); @@ -1435,7 +1486,7 @@ recv_thread_init(void) if ((rc = sctp_userspace_thread_create(&SCTP_BASE_VAR(recvthreadudp6), &recv_function_udp6))) { SCTPDBG(SCTP_DEBUG_USR, "Can't start SCTP/UDP/IPv6 recv thread (%d).\n", rc); -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) closesocket(SCTP_BASE_VAR(userspace_udpsctp6)); #else close(SCTP_BASE_VAR(userspace_udpsctp6)); @@ -1449,42 +1500,63 @@ recv_thread_init(void) void recv_thread_destroy(void) { -#if defined(__Userspace_os_Darwin) || defined(__Userspace_os_DragonFly) || defined(__Userspace_os_FreeBSD) +#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) #if defined(INET) || defined(INET6) if (SCTP_BASE_VAR(userspace_route) != -1) { close(SCTP_BASE_VAR(userspace_route)); + pthread_join(SCTP_BASE_VAR(recvthreadroute), NULL); } #endif #endif #if defined(INET) if (SCTP_BASE_VAR(userspace_rawsctp) != -1) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) closesocket(SCTP_BASE_VAR(userspace_rawsctp)); + SCTP_BASE_VAR(userspace_rawsctp) = -1; + WaitForSingleObject(SCTP_BASE_VAR(recvthreadraw), INFINITE); + CloseHandle(SCTP_BASE_VAR(recvthreadraw)); #else close(SCTP_BASE_VAR(userspace_rawsctp)); + SCTP_BASE_VAR(userspace_rawsctp) = -1; + pthread_join(SCTP_BASE_VAR(recvthreadraw), NULL); #endif } if (SCTP_BASE_VAR(userspace_udpsctp) != -1) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) closesocket(SCTP_BASE_VAR(userspace_udpsctp)); + SCTP_BASE_VAR(userspace_udpsctp) = -1; + WaitForSingleObject(SCTP_BASE_VAR(recvthreadudp), INFINITE); + CloseHandle(SCTP_BASE_VAR(recvthreadudp)); #else close(SCTP_BASE_VAR(userspace_udpsctp)); + SCTP_BASE_VAR(userspace_udpsctp) = -1; + pthread_join(SCTP_BASE_VAR(recvthreadudp), NULL); #endif } #endif #if defined(INET6) if (SCTP_BASE_VAR(userspace_rawsctp6) != -1) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) closesocket(SCTP_BASE_VAR(userspace_rawsctp6)); + SCTP_BASE_VAR(userspace_rawsctp6) = -1; + WaitForSingleObject(SCTP_BASE_VAR(recvthreadraw6), INFINITE); + CloseHandle(SCTP_BASE_VAR(recvthreadraw6)); #else close(SCTP_BASE_VAR(userspace_rawsctp6)); + SCTP_BASE_VAR(userspace_rawsctp6) = -1; + pthread_join(SCTP_BASE_VAR(recvthreadraw6), NULL); #endif } if (SCTP_BASE_VAR(userspace_udpsctp6) != -1) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) + SCTP_BASE_VAR(userspace_udpsctp6) = -1; closesocket(SCTP_BASE_VAR(userspace_udpsctp6)); + WaitForSingleObject(SCTP_BASE_VAR(recvthreadudp6), INFINITE); + CloseHandle(SCTP_BASE_VAR(recvthreadudp6)); #else close(SCTP_BASE_VAR(userspace_udpsctp6)); + SCTP_BASE_VAR(userspace_udpsctp6) = -1; + pthread_join(SCTP_BASE_VAR(recvthreadudp6), NULL); #endif } #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/user_route.h b/TMessagesProj/jni/third_party/usrsctplib/user_route.h index 82b07d769..4abf2eac9 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/user_route.h +++ b/TMessagesProj/jni/third_party/usrsctplib/user_route.h @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors + * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * diff --git a/TMessagesProj/jni/third_party/usrsctplib/user_socket.c b/TMessagesProj/jni/third_party/usrsctplib/user_socket.c index 146a6d9f9..9385abd87 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/user_socket.c +++ b/TMessagesProj/jni/third_party/usrsctplib/user_socket.c @@ -44,13 +44,13 @@ #ifdef INET6 #include #endif -#if defined(__Userspace_os_FreeBSD) +#if defined(__FreeBSD__) #include #endif -#if defined(__Userspace_os_Linux) +#if defined(__linux__) #define __FAVOR_BSD /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */ #endif -#if !defined (__Userspace_os_Windows) +#if !defined(_WIN32) #if defined INET || defined INET6 #include #endif @@ -79,7 +79,7 @@ extern int sctp_attach(struct socket *so, int proto, uint32_t vrf_id); extern int sctpconn_attach(struct socket *so, int proto, uint32_t vrf_id); static void init_sync(void) { -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) #if defined(INET) || defined(INET6) WSADATA wsaData; @@ -115,8 +115,8 @@ usrsctp_init(uint16_t port, void usrsctp_init_nothreads(uint16_t port, - int (*conn_output)(void *addr, void *buffer, size_t length, uint8_t tos, uint8_t set_df), - void (*debug_printf)(const char *format, ...)) + int (*conn_output)(void *addr, void *buffer, size_t length, uint8_t tos, uint8_t set_df), + void (*debug_printf)(const char *format, ...)) { init_sync(); sctp_init(port, conn_output, debug_printf, 0); @@ -169,28 +169,17 @@ socantsendmore(struct socket *so) int sbwait(struct sockbuf *sb) { -#if defined(__Userspace__) /* __Userspace__ */ - SOCKBUF_LOCK_ASSERT(sb); sb->sb_flags |= SB_WAIT; -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) if (SleepConditionVariableCS(&(sb->sb_cond), &(sb->sb_mtx), INFINITE)) - return 0; + return (0); else - return -1; + return (-1); #else return (pthread_cond_wait(&(sb->sb_cond), &(sb->sb_mtx))); #endif - -#else - SOCKBUF_LOCK_ASSERT(sb); - - sb->sb_flags |= SB_WAIT; - return (msleep(&sb->sb_cc, &sb->sb_mtx, - (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait", - sb->sb_timeo)); -#endif } @@ -394,7 +383,7 @@ void wakeup(void *ident, struct socket *so) { SOCK_LOCK(so); -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) WakeAllConditionVariable(&(so)->timeo_cond); #else pthread_cond_broadcast(&(so)->timeo_cond); @@ -420,7 +409,7 @@ wakeup_one(void *ident) subsidiary sockets. */ ACCEPT_LOCK(); -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) WakeAllConditionVariable(&accept_cond); #else pthread_cond_broadcast(&accept_cond); @@ -568,28 +557,6 @@ sonewconn(struct socket *head, int connstatus) } -/* From /src/sys/sys/sysproto.h */ -struct sctp_generic_sendmsg_args { - int sd; - caddr_t msg; - int mlen; - caddr_t to; - socklen_t tolen; /* was __socklen_t */ - struct sctp_sndrcvinfo * sinfo; - int flags; -}; - -struct sctp_generic_recvmsg_args { - int sd; - struct iovec *iov; - int iovlen; - struct sockaddr *from; - socklen_t *fromlenaddr; /* was __socklen_t */ - struct sctp_sndrcvinfo *sinfo; - int *msg_flags; -}; - - /* Source: /src/sys/gnu/fs/xfs/FreeBSD/xfs_ioctl.c */ @@ -774,11 +741,11 @@ userspace_sctp_sendmsg(struct socket *so, size_t len, struct sockaddr *to, socklen_t tolen, - u_int32_t ppid, - u_int32_t flags, - u_int16_t stream_no, - u_int32_t timetolive, - u_int32_t context) + uint32_t ppid, + uint32_t flags, + uint16_t stream_no, + uint32_t timetolive, + uint32_t context) { struct sctp_sndrcvinfo sndrcvinfo, *sinfo = &sndrcvinfo; struct uio auio; @@ -958,11 +925,11 @@ userspace_sctp_sendmbuf(struct socket *so, size_t len, struct sockaddr *to, socklen_t tolen, - u_int32_t ppid, - u_int32_t flags, - u_int16_t stream_no, - u_int32_t timetolive, - u_int32_t context) + uint32_t ppid, + uint32_t flags, + uint16_t stream_no, + uint32_t timetolive, + uint32_t context) { struct sctp_sndrcvinfo sndrcvinfo, *sinfo = &sndrcvinfo; @@ -1068,7 +1035,7 @@ userspace_sctp_recvmsg(struct socket *so, if (error) { if ((auio.uio_resid != ulen) && (error == EINTR || -#if !defined(__Userspace_os_NetBSD) +#if !defined(__NetBSD__) error == ERESTART || #endif error == EWOULDBLOCK)) { @@ -1161,7 +1128,7 @@ usrsctp_recvv(struct socket *so, if (errno) { if ((auio.uio_resid != ulen) && (errno == EINTR || -#if !defined(__Userspace_os_NetBSD) +#if !defined(__NetBSD__) errno == ERESTART || #endif errno == EWOULDBLOCK)) { @@ -1260,7 +1227,6 @@ out: -#if defined(__Userspace__) /* Taken from /src/sys/kern/uipc_socket.c * and modified for __Userspace__ * socreate returns a socket. The socket should be @@ -1331,74 +1297,6 @@ socreate(int dom, struct socket **aso, int type, int proto) *aso = so; return (0); } -#else -/* The kernel version for reference is below. The #else - should be removed once the __Userspace__ - version is tested. - * socreate returns a socket with a ref count of 1. The socket should be - * closed with soclose(). - */ -int -socreate(int dom, struct socket **aso, int type, int proto, - struct ucred *cred, struct thread *td) -{ - struct protosw *prp; - struct socket *so; - int error; - - if (proto) - prp = pffindproto(dom, proto, type); - else - prp = pffindtype(dom, type); - - if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL || - prp->pr_usrreqs->pru_attach == pru_attach_notsupp) - return (EPROTONOSUPPORT); - - if (jailed(cred) && jail_socket_unixiproute_only && - prp->pr_domain->dom_family != PF_LOCAL && - prp->pr_domain->dom_family != PF_INET && - prp->pr_domain->dom_family != PF_ROUTE) { - return (EPROTONOSUPPORT); - } - - if (prp->pr_type != type) - return (EPROTOTYPE); - so = soalloc(); - if (so == NULL) - return (ENOBUFS); - - TAILQ_INIT(&so->so_incomp); - TAILQ_INIT(&so->so_comp); - so->so_type = type; - so->so_cred = crhold(cred); - so->so_proto = prp; -#ifdef MAC - mac_create_socket(cred, so); -#endif - knlist_init(&so->so_rcv.sb_sel.si_note, SOCKBUF_MTX(&so->so_rcv), - NULL, NULL, NULL); - knlist_init(&so->so_snd.sb_sel.si_note, SOCKBUF_MTX(&so->so_snd), - NULL, NULL, NULL); - so->so_count = 1; - /* - * Auto-sizing of socket buffers is managed by the protocols and - * the appropriate flags must be set in the pru_attach function. - */ - error = (*prp->pr_usrreqs->pru_attach)(so, proto, td); - if (error) { - KASSERT(so->so_count == 1, ("socreate: so_count %d", - so->so_count)); - so->so_count = 0; - sodealloc(so); - return (error); - } - *aso = so; - return (0); -} -#endif - - /* Taken from /src/sys/kern/uipc_syscalls.c @@ -1426,11 +1324,11 @@ struct socket * usrsctp_socket(int domain, int type, int protocol, int (*receive_cb)(struct socket *sock, union sctp_sockstore addr, void *data, size_t datalen, struct sctp_rcvinfo, int flags, void *ulp_info), - int (*send_cb)(struct socket *sock, uint32_t sb_free), + int (*send_cb)(struct socket *sock, uint32_t sb_free, void *ulp_info), uint32_t sb_threshold, void *ulp_info) { - struct socket *so; + struct socket *so = NULL; if ((protocol == IPPROTO_SCTP) && (SCTP_BASE_VAR(sctp_pcb_initialized) == 0)) { errno = EPROTONOSUPPORT; @@ -1493,7 +1391,6 @@ sbreserve(struct sockbuf *sb, u_long cc, struct socket *so) return (error); } -#if defined(__Userspace__) int soreserve(struct socket *so, u_long sndcc, u_long rcvcc) { @@ -1523,45 +1420,12 @@ soreserve(struct socket *so, u_long sndcc, u_long rcvcc) SOCKBUF_UNLOCK(&so->so_snd); return (ENOBUFS); } -#else /* kernel version for reference */ -int -soreserve(struct socket *so, u_long sndcc, u_long rcvcc) -{ - struct thread *td = curthread; - - SOCKBUF_LOCK(&so->so_snd); - SOCKBUF_LOCK(&so->so_rcv); - if (sbreserve_locked(&so->so_snd, sndcc, so, td) == 0) - goto bad; - if (sbreserve_locked(&so->so_rcv, rcvcc, so, td) == 0) - goto bad2; - if (so->so_rcv.sb_lowat == 0) - so->so_rcv.sb_lowat = 1; - if (so->so_snd.sb_lowat == 0) - so->so_snd.sb_lowat = MCLBYTES; - if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) - so->so_snd.sb_lowat = so->so_snd.sb_hiwat; - SOCKBUF_UNLOCK(&so->so_rcv); - SOCKBUF_UNLOCK(&so->so_snd); - return (0); -bad2: - sbrelease_locked(&so->so_snd, so); -bad: - SOCKBUF_UNLOCK(&so->so_rcv); - SOCKBUF_UNLOCK(&so->so_snd); - return (ENOBUFS); -} -#endif - - - /* Taken from /src/sys/kern/uipc_sockbuf.c * and modified for __Userspace__ */ -#if defined(__Userspace__) void sowakeup(struct socket *so, struct sockbuf *sb) { @@ -1571,7 +1435,7 @@ sowakeup(struct socket *so, struct sockbuf *sb) sb->sb_flags &= ~SB_SEL; if (sb->sb_flags & SB_WAIT) { sb->sb_flags &= ~SB_WAIT; -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) WakeAllConditionVariable(&(sb)->sb_cond); #else pthread_cond_broadcast(&(sb)->sb_cond); @@ -1579,43 +1443,6 @@ sowakeup(struct socket *so, struct sockbuf *sb) } SOCKBUF_UNLOCK(sb); } -#else /* kernel version for reference */ -/* - * Wakeup processes waiting on a socket buffer. Do asynchronous notification - * via SIGIO if the socket has the SS_ASYNC flag set. - * - * Called with the socket buffer lock held; will release the lock by the end - * of the function. This allows the caller to acquire the socket buffer lock - * while testing for the need for various sorts of wakeup and hold it through - * to the point where it's no longer required. We currently hold the lock - * through calls out to other subsystems (with the exception of kqueue), and - * then release it to avoid lock order issues. It's not clear that's - * correct. - */ -void -sowakeup(struct socket *so, struct sockbuf *sb) -{ - - SOCKBUF_LOCK_ASSERT(sb); - - selwakeuppri(&sb->sb_sel, PSOCK); - sb->sb_flags &= ~SB_SEL; - if (sb->sb_flags & SB_WAIT) { - sb->sb_flags &= ~SB_WAIT; - wakeup(&sb->sb_cc); - } - KNOTE_LOCKED(&sb->sb_sel.si_note, 0); - SOCKBUF_UNLOCK(sb); - if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL) - pgsigio(&so->so_sigio, SIGIO, 0); - if (sb->sb_flags & SB_UPCALL) - (*so->so_upcall)(so, so->so_upcallarg, M_NOWAIT); - if (sb->sb_flags & SB_AIO) - aio_swake(so, sb); - mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED); -} -#endif - /* Taken from /src/sys/kern/uipc_socket.c @@ -1787,7 +1614,7 @@ user_accept(struct socket *head, struct sockaddr **name, socklen_t *namelen, st head->so_error = ECONNABORTED; break; } -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) if (SleepConditionVariableCS(&accept_cond, &accept_mtx, INFINITE)) error = 0; else @@ -2051,7 +1878,7 @@ soconnect(struct socket *so, struct sockaddr *nam) * Otherwise, if connected, try to disconnect first. This allows * user to disconnect by connecting to, e.g., a null address. */ - if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && (error = sodisconnect(so))) { + if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && (sodisconnect(so) != 0)) { error = EISCONN; } else { /* @@ -2108,7 +1935,7 @@ int user_connect(struct socket *so, struct sockaddr *sa) SOCK_LOCK(so); while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) if (SleepConditionVariableCS(SOCK_COND(so), SOCK_MTX(so), INFINITE)) error = 0; else @@ -2117,7 +1944,7 @@ int user_connect(struct socket *so, struct sockaddr *sa) error = pthread_cond_wait(SOCK_COND(so), SOCK_MTX(so)); #endif if (error) { -#if defined(__Userspace_os_NetBSD) +#if defined(__NetBSD__) if (error == EINTR) { #else if (error == EINTR || error == ERESTART) { @@ -2137,7 +1964,7 @@ bad: if (!interrupted) { so->so_state &= ~SS_ISCONNECTING; } -#if !defined(__Userspace_os_NetBSD) +#if !defined(__NetBSD__) if (error == ERESTART) { error = EINTR; } @@ -2247,7 +2074,7 @@ usrsctp_finish(void) return (-1); } sctp_finish(); -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) DeleteConditionVariable(&accept_cond); DeleteCriticalSection(&accept_mtx); #if defined(INET) || defined(INET6) @@ -2383,7 +2210,7 @@ usrsctp_getsockopt(struct socket *so, int level, int option_name, int *buf_size; buf_size = (int *)option_value; - *buf_size = so->so_rcv.sb_hiwat;; + *buf_size = so->so_rcv.sb_hiwat; *option_len = (socklen_t)sizeof(int); return (0); } @@ -2583,10 +2410,16 @@ usrsctp_set_ulpinfo(struct socket *so, void *ulp_info) return (register_ulp_info(so, ulp_info)); } + +int +usrsctp_get_ulpinfo(struct socket *so, void **pulp_info) +{ + return (retrieve_ulp_info(so, pulp_info)); +} + int usrsctp_bindx(struct socket *so, struct sockaddr *addrs, int addrcnt, int flags) { - struct sctp_getaddresses *gaddrs; struct sockaddr *sa; #ifdef INET struct sockaddr_in *sin; @@ -2595,9 +2428,9 @@ usrsctp_bindx(struct socket *so, struct sockaddr *addrs, int addrcnt, int flags) struct sockaddr_in6 *sin6; #endif int i; - size_t argsz; #if defined(INET) || defined(INET6) - uint16_t sport = 0; + uint16_t sport; + bool fix_port; #endif /* validate the flags */ @@ -2611,6 +2444,10 @@ usrsctp_bindx(struct socket *so, struct sockaddr *addrs, int addrcnt, int flags) errno = EINVAL; return (-1); } +#if defined(INET) || defined(INET6) + sport = 0; + fix_port = false; +#endif /* First pre-screen the addresses */ sa = addrs; for (i = 0; i < addrcnt; i++) { @@ -2635,6 +2472,7 @@ usrsctp_bindx(struct socket *so, struct sockaddr *addrs, int addrcnt, int flags) } else { /* save off the port */ sport = sin->sin_port; + fix_port = (i > 0); } } #ifndef HAVE_SA_LEN @@ -2662,6 +2500,7 @@ usrsctp_bindx(struct socket *so, struct sockaddr *addrs, int addrcnt, int flags) } else { /* save off the port */ sport = sin6->sin6_port; + fix_port = (i > 0); } } #ifndef HAVE_SA_LEN @@ -2678,41 +2517,30 @@ usrsctp_bindx(struct socket *so, struct sockaddr *addrs, int addrcnt, int flags) sa = (struct sockaddr *)((caddr_t)sa + sa->sa_len); #endif } - argsz = sizeof(struct sctp_getaddresses) + - sizeof(struct sockaddr_storage); - if ((gaddrs = (struct sctp_getaddresses *)malloc(argsz)) == NULL) { - errno = ENOMEM; - return (-1); - } sa = addrs; for (i = 0; i < addrcnt; i++) { #ifndef HAVE_SA_LEN size_t sa_len; + #endif - memset(gaddrs, 0, argsz); - gaddrs->sget_assoc_id = 0; #ifdef HAVE_SA_LEN - memcpy(gaddrs->addr, sa, sa->sa_len); #if defined(INET) || defined(INET6) - if ((i == 0) && (sport != 0)) { - switch (gaddrs->addr->sa_family) { + if (fix_port) { + switch (sa->sa_family) { #ifdef INET case AF_INET: - sin = (struct sockaddr_in *)gaddrs->addr; - sin->sin_port = sport; + ((struct sockaddr_in *)sa)->sin_port = sport; break; #endif #ifdef INET6 case AF_INET6: - sin6 = (struct sockaddr_in6 *)gaddrs->addr; - sin6->sin6_port = sport; + ((struct sockaddr_in6 *)sa)->sin6_port = sport; break; #endif } } #endif - if (usrsctp_setsockopt(so, IPPROTO_SCTP, flags, gaddrs, (socklen_t)argsz) != 0) { - free(gaddrs); + if (usrsctp_setsockopt(so, IPPROTO_SCTP, flags, sa, sa->sa_len) != 0) { return (-1); } sa = (struct sockaddr *)((caddr_t)sa + sa->sa_len); @@ -2732,38 +2560,33 @@ usrsctp_bindx(struct socket *so, struct sockaddr *addrs, int addrcnt, int flags) sa_len = 0; break; } - memcpy(gaddrs->addr, sa, sa_len); /* * Now, if there was a port mentioned, assure that the * first address has that port to make sure it fails or * succeeds correctly. */ #if defined(INET) || defined(INET6) - if ((i == 0) && (sport != 0)) { - switch (gaddrs->addr->sa_family) { + if (fix_port) { + switch (sa->sa_family) { #ifdef INET case AF_INET: - sin = (struct sockaddr_in *)gaddrs->addr; - sin->sin_port = sport; + ((struct sockaddr_in *)sa)->sin_port = sport; break; #endif #ifdef INET6 case AF_INET6: - sin6 = (struct sockaddr_in6 *)gaddrs->addr; - sin6->sin6_port = sport; + ((struct sockaddr_in6 *)sa)->sin6_port = sport; break; #endif } } #endif - if (usrsctp_setsockopt(so, IPPROTO_SCTP, flags, gaddrs, (socklen_t)argsz) != 0) { - free(gaddrs); + if (usrsctp_setsockopt(so, IPPROTO_SCTP, flags, sa, (socklen_t)sa_len) != 0) { return (-1); } sa = (struct sockaddr *)((caddr_t)sa + sa_len); #endif } - free(gaddrs); return (0); } @@ -2899,9 +2722,9 @@ usrsctp_getpaddrs(struct socket *so, sctp_assoc_t id, struct sockaddr **raddrs) free(addrs); return (-1); } - *raddrs = (struct sockaddr *)&addrs->addr[0]; + *raddrs = &addrs->addr[0].sa; cnt = 0; - sa = (struct sockaddr *)&addrs->addr[0]; + sa = &addrs->addr[0].sa; lim = (caddr_t)addrs + opt_len; #ifdef HAVE_SA_LEN while (((caddr_t)sa < lim) && (sa->sa_len > 0)) { @@ -2938,7 +2761,7 @@ usrsctp_freepaddrs(struct sockaddr *addrs) /* Take away the hidden association id */ void *fr_addr; - fr_addr = (void *)((caddr_t)addrs - sizeof(sctp_assoc_t)); + fr_addr = (void *)((caddr_t)addrs - offsetof(struct sctp_getaddresses, addr)); /* Now free it */ free(fr_addr); } @@ -2967,9 +2790,7 @@ usrsctp_getladdrs(struct socket *so, sctp_assoc_t id, struct sockaddr **raddrs) errno = ENOTCONN; return (-1); } - opt_len = (socklen_t)(size_of_addresses + - sizeof(struct sockaddr_storage) + - sizeof(struct sctp_getaddresses)); + opt_len = (socklen_t)(size_of_addresses + sizeof(struct sctp_getaddresses)); addrs = calloc(1, (size_t)opt_len); if (addrs == NULL) { errno = ENOMEM; @@ -2982,9 +2803,9 @@ usrsctp_getladdrs(struct socket *so, sctp_assoc_t id, struct sockaddr **raddrs) errno = ENOMEM; return (-1); } - *raddrs = (struct sockaddr *)&addrs->addr[0]; + *raddrs = &addrs->addr[0].sa; cnt = 0; - sa = (struct sockaddr *)&addrs->addr[0]; + sa = &addrs->addr[0].sa; lim = (caddr_t)addrs + opt_len; #ifdef HAVE_SA_LEN while (((caddr_t)sa < lim) && (sa->sa_len > 0)) { @@ -3021,7 +2842,7 @@ usrsctp_freeladdrs(struct sockaddr *addrs) /* Take away the hidden association id */ void *fr_addr; - fr_addr = (void *)((caddr_t)addrs - sizeof(sctp_assoc_t)); + fr_addr = (void *)((caddr_t)addrs - offsetof(struct sctp_getaddresses, addr)); /* Now free it */ free(fr_addr); } @@ -3035,16 +2856,11 @@ sctp_userspace_ip_output(int *result, struct mbuf *o_pak, struct mbuf *m; struct mbuf *m_orig; int iovcnt; - int send_len; int len; - int send_count; struct ip *ip; struct udphdr *udp; -#if !defined (__Userspace_os_Windows) - int res; -#endif struct sockaddr_in dst; -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) WSAMSG win_msg_hdr; DWORD win_sent_len; WSABUF send_iovec[MAXLEN_MBUF_CHAIN]; @@ -3090,7 +2906,7 @@ sctp_userspace_ip_output(int *result, struct mbuf *o_pak, SCTP_PRINTF("Why did the SCTP implementation did not choose a source address?\n"); } /* TODO need to worry about ro->ro_dst as in ip_output? */ -#if defined(__Userspace_os_Linux) || defined (__Userspace_os_Windows) || (defined(__Userspace_os_FreeBSD) && (__FreeBSD_version >= 1100030)) +#if defined(__linux__) || defined(_WIN32) || (defined(__FreeBSD__) && (__FreeBSD_version >= 1100030)) /* need to put certain fields into network order for Linux */ ip->ip_len = htons(ip->ip_len); #endif @@ -3113,17 +2929,13 @@ sctp_userspace_ip_output(int *result, struct mbuf *o_pak, m_adj(m, sizeof(struct ip) + sizeof(struct udphdr)); } - send_len = SCTP_HEADER_LEN(m); /* length of entire packet */ - send_count = 0; for (iovcnt = 0; m != NULL && iovcnt < MAXLEN_MBUF_CHAIN; m = m->m_next, iovcnt++) { -#if !defined (__Userspace_os_Windows) +#if !defined(_WIN32) send_iovec[iovcnt].iov_base = (caddr_t)m->m_data; send_iovec[iovcnt].iov_len = SCTP_BUF_LEN(m); - send_count += send_iovec[iovcnt].iov_len; #else send_iovec[iovcnt].buf = (caddr_t)m->m_data; send_iovec[iovcnt].len = SCTP_BUF_LEN(m); - send_count += send_iovec[iovcnt].len; #endif } @@ -3132,7 +2944,7 @@ sctp_userspace_ip_output(int *result, struct mbuf *o_pak, goto free_mbuf; } -#if !defined (__Userspace_os_Windows) +#if !defined(_WIN32) msg_hdr.msg_name = (struct sockaddr *) &dst; msg_hdr.msg_namelen = sizeof(struct sockaddr_in); msg_hdr.msg_iov = send_iovec; @@ -3142,12 +2954,12 @@ sctp_userspace_ip_output(int *result, struct mbuf *o_pak, msg_hdr.msg_flags = 0; if ((!use_udp_tunneling) && (SCTP_BASE_VAR(userspace_rawsctp) != -1)) { - if ((res = sendmsg(SCTP_BASE_VAR(userspace_rawsctp), &msg_hdr, MSG_DONTWAIT)) != send_len) { + if (sendmsg(SCTP_BASE_VAR(userspace_rawsctp), &msg_hdr, MSG_DONTWAIT) < 0) { *result = errno; } } if ((use_udp_tunneling) && (SCTP_BASE_VAR(userspace_udpsctp) != -1)) { - if ((res = sendmsg(SCTP_BASE_VAR(userspace_udpsctp), &msg_hdr, MSG_DONTWAIT)) != send_len) { + if (sendmsg(SCTP_BASE_VAR(userspace_udpsctp), &msg_hdr, MSG_DONTWAIT) < 0) { *result = errno; } } @@ -3164,15 +2976,11 @@ sctp_userspace_ip_output(int *result, struct mbuf *o_pak, if ((!use_udp_tunneling) && (SCTP_BASE_VAR(userspace_rawsctp) != -1)) { if (WSASendTo(SCTP_BASE_VAR(userspace_rawsctp), (LPWSABUF) send_iovec, iovcnt, &win_sent_len, win_msg_hdr.dwFlags, win_msg_hdr.name, (int) win_msg_hdr.namelen, NULL, NULL) != 0) { *result = WSAGetLastError(); - } else if ((int)win_sent_len != send_len) { - *result = WSAGetLastError(); } } if ((use_udp_tunneling) && (SCTP_BASE_VAR(userspace_udpsctp) != -1)) { if (WSASendTo(SCTP_BASE_VAR(userspace_udpsctp), (LPWSABUF) send_iovec, iovcnt, &win_sent_len, win_msg_hdr.dwFlags, win_msg_hdr.name, (int) win_msg_hdr.namelen, NULL, NULL) != 0) { *result = WSAGetLastError(); - } else if ((int)win_sent_len != send_len) { - *result = WSAGetLastError(); } } #endif @@ -3181,7 +2989,7 @@ free_mbuf: } #endif -#if defined (INET6) +#if defined(INET6) void sctp_userspace_ip6_output(int *result, struct mbuf *o_pak, struct route_in6 *ro, void *stcb, uint32_t vrf_id) @@ -3189,16 +2997,11 @@ void sctp_userspace_ip6_output(int *result, struct mbuf *o_pak, struct mbuf *m; struct mbuf *m_orig; int iovcnt; - int send_len; int len; - int send_count; struct ip6_hdr *ip6; struct udphdr *udp; -#if !defined (__Userspace_os_Windows) - int res; -#endif struct sockaddr_in6 dst; -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) WSAMSG win_msg_hdr; DWORD win_sent_len; WSABUF send_iovec[MAXLEN_MBUF_CHAIN]; @@ -3265,20 +3068,16 @@ void sctp_userspace_ip6_output(int *result, struct mbuf *o_pak, if (use_udp_tunneling) { m_adj(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); } else { - m_adj(m, sizeof(struct ip6_hdr)); + m_adj(m, sizeof(struct ip6_hdr)); } - send_len = SCTP_HEADER_LEN(m); /* length of entire packet */ - send_count = 0; for (iovcnt = 0; m != NULL && iovcnt < MAXLEN_MBUF_CHAIN; m = m->m_next, iovcnt++) { -#if !defined (__Userspace_os_Windows) +#if !defined(_WIN32) send_iovec[iovcnt].iov_base = (caddr_t)m->m_data; send_iovec[iovcnt].iov_len = SCTP_BUF_LEN(m); - send_count += send_iovec[iovcnt].iov_len; #else send_iovec[iovcnt].buf = (caddr_t)m->m_data; send_iovec[iovcnt].len = SCTP_BUF_LEN(m); - send_count += send_iovec[iovcnt].len; #endif } if (m != NULL) { @@ -3286,7 +3085,7 @@ void sctp_userspace_ip6_output(int *result, struct mbuf *o_pak, goto free_mbuf; } -#if !defined (__Userspace_os_Windows) +#if !defined(_WIN32) msg_hdr.msg_name = (struct sockaddr *) &dst; msg_hdr.msg_namelen = sizeof(struct sockaddr_in6); msg_hdr.msg_iov = send_iovec; @@ -3296,12 +3095,12 @@ void sctp_userspace_ip6_output(int *result, struct mbuf *o_pak, msg_hdr.msg_flags = 0; if ((!use_udp_tunneling) && (SCTP_BASE_VAR(userspace_rawsctp6) != -1)) { - if ((res = sendmsg(SCTP_BASE_VAR(userspace_rawsctp6), &msg_hdr, MSG_DONTWAIT)) != send_len) { + if (sendmsg(SCTP_BASE_VAR(userspace_rawsctp6), &msg_hdr, MSG_DONTWAIT)< 0) { *result = errno; } } if ((use_udp_tunneling) && (SCTP_BASE_VAR(userspace_udpsctp6) != -1)) { - if ((res = sendmsg(SCTP_BASE_VAR(userspace_udpsctp6), &msg_hdr, MSG_DONTWAIT)) != send_len) { + if (sendmsg(SCTP_BASE_VAR(userspace_udpsctp6), &msg_hdr, MSG_DONTWAIT) < 0) { *result = errno; } } @@ -3318,15 +3117,11 @@ void sctp_userspace_ip6_output(int *result, struct mbuf *o_pak, if ((!use_udp_tunneling) && (SCTP_BASE_VAR(userspace_rawsctp6) != -1)) { if (WSASendTo(SCTP_BASE_VAR(userspace_rawsctp6), (LPWSABUF) send_iovec, iovcnt, &win_sent_len, win_msg_hdr.dwFlags, win_msg_hdr.name, (int) win_msg_hdr.namelen, NULL, NULL) != 0) { *result = WSAGetLastError(); - } else if ((int)win_sent_len != send_len) { - *result = WSAGetLastError(); } } if ((use_udp_tunneling) && (SCTP_BASE_VAR(userspace_udpsctp6) != -1)) { if (WSASendTo(SCTP_BASE_VAR(userspace_udpsctp6), (LPWSABUF) send_iovec, iovcnt, &win_sent_len, win_msg_hdr.dwFlags, win_msg_hdr.name, (int) win_msg_hdr.namelen, NULL, NULL) != 0) { *result = WSAGetLastError(); - } else if ((int)win_sent_len != send_len) { - *result = WSAGetLastError(); } } #endif @@ -3405,21 +3200,30 @@ usrsctp_dumppacket(const void *buf, size_t len, int outbound) ftime(&tb); localtime_s(&t, &tb.time); #if defined(__MINGW32__) - snprintf(dump_buf, PREAMBLE_LENGTH + 1, PREAMBLE_FORMAT, - outbound ? 'O' : 'I', - t.tm_hour, t.tm_min, t.tm_sec, (long)(1000 * tb.millitm)); + if (snprintf(dump_buf, PREAMBLE_LENGTH + 1, PREAMBLE_FORMAT, + outbound ? 'O' : 'I', + t.tm_hour, t.tm_min, t.tm_sec, (long)(1000 * tb.millitm)) < 0) { + free(dump_buf); + return (NULL); + } #else - _snprintf_s(dump_buf, PREAMBLE_LENGTH + 1, PREAMBLE_LENGTH, PREAMBLE_FORMAT, - outbound ? 'O' : 'I', - t.tm_hour, t.tm_min, t.tm_sec, (long)(1000 * tb.millitm)); + if (_snprintf_s(dump_buf, PREAMBLE_LENGTH + 1, PREAMBLE_LENGTH, PREAMBLE_FORMAT, + outbound ? 'O' : 'I', + t.tm_hour, t.tm_min, t.tm_sec, (long)(1000 * tb.millitm)) < 0) { + free(dump_buf); + return (NULL); + } #endif #else gettimeofday(&tv, NULL); sec = (time_t)tv.tv_sec; localtime_r((const time_t *)&sec, &t); - snprintf(dump_buf, PREAMBLE_LENGTH + 1, PREAMBLE_FORMAT, - outbound ? 'O' : 'I', - t.tm_hour, t.tm_min, t.tm_sec, (long)tv.tv_usec); + if (snprintf(dump_buf, PREAMBLE_LENGTH + 1, PREAMBLE_FORMAT, + outbound ? 'O' : 'I', + t.tm_hour, t.tm_min, t.tm_sec, (long)tv.tv_usec) < 0) { + free(dump_buf); + return (NULL); + } #endif pos += PREAMBLE_LENGTH; #if defined(_WIN32) && !defined(__MINGW32__) @@ -3482,9 +3286,10 @@ void usrsctp_conninput(void *addr, const void *buffer, size_t length, uint8_t ecn_bits) { struct sockaddr_conn src, dst; - struct mbuf *m; + struct mbuf *m, *mm; struct sctphdr *sh; struct sctp_chunkhdr *ch; + int remaining, offset; SCTP_STAT_INCR(sctps_recvpackets); SCTP_STAT_INCR_COUNTER64(sctps_inpackets); @@ -3503,18 +3308,30 @@ usrsctp_conninput(void *addr, const void *buffer, size_t length, uint8_t ecn_bit if ((m = sctp_get_mbuf_for_msg((unsigned int)length, 1, M_NOWAIT, 0, MT_DATA)) == NULL) { return; } + /* Set the lengths fields of the mbuf chain. + * This is expected by m_copyback(). + */ + remaining = (int)length; + for (mm = m; mm != NULL; mm = mm->m_next) { + mm->m_len = min((int)M_SIZE(mm), remaining); + m->m_pkthdr.len += mm->m_len; + remaining -= mm->m_len; + } + KASSERT(remaining == 0, ("usrsctp_conninput: %zu bytes left", remaining)); m_copyback(m, 0, (int)length, (caddr_t)buffer); - if (SCTP_BUF_LEN(m) < (int)(sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr))) { - if ((m = m_pullup(m, sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr))) == NULL) { + offset = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); + if (SCTP_BUF_LEN(m) < offset) { + if ((m = m_pullup(m, offset)) == NULL) { SCTP_STAT_INCR(sctps_hdrops); return; } } - sh = mtod(m, struct sctphdr *);; + sh = mtod(m, struct sctphdr *); ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr)); + offset -= sizeof(struct sctp_chunkhdr); src.sconn_port = sh->src_port; dst.sconn_port = sh->dest_port; - sctp_common_input_processing(&m, 0, sizeof(struct sctphdr), (int)length, + sctp_common_input_processing(&m, 0, offset, (int)length, (struct sockaddr *)&src, (struct sockaddr *)&dst, sh, ch, @@ -3527,9 +3344,9 @@ usrsctp_conninput(void *addr, const void *buffer, size_t length, uint8_t ecn_bit return; } -void usrsctp_handle_timers(uint32_t delta) +void usrsctp_handle_timers(uint32_t elapsed_milliseconds) { - sctp_handle_tick(delta); + sctp_handle_tick(sctp_msecs_to_ticks(elapsed_milliseconds)); } int diff --git a/TMessagesProj/jni/third_party/usrsctplib/user_socketvar.h b/TMessagesProj/jni/third_party/usrsctplib/user_socketvar.h index cd235fafe..e8eccc7fb 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/user_socketvar.h +++ b/TMessagesProj/jni/third_party/usrsctplib/user_socketvar.h @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors + * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * @@ -33,7 +33,7 @@ #ifndef _USER_SOCKETVAR_H_ #define _USER_SOCKETVAR_H_ -#if defined(__Userspace_os_Darwin) +#if defined(__APPLE__) #include #include #endif @@ -42,7 +42,7 @@ /* #include was 0 byte file */ /* #include was 0 byte file */ /* #include */ /*__Userspace__ alternative?*/ -#if !defined(__Userspace_os_DragonFly) && !defined(__Userspace_os_FreeBSD) && !defined(__Userspace_os_NetBSD) && !defined(__Userspace_os_Windows) && !defined(__Userspace_os_NaCl) +#if !defined(__DragonFly__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(_WIN32) && !defined(__native_client__) #include #endif #define SOCK_MAXADDRLEN 255 @@ -54,16 +54,16 @@ #define SS_CANTRCVMORE 0x020 #define SS_CANTSENDMORE 0x010 -#if defined(__Userspace_os_Darwin) || defined(__Userspace_os_DragonFly) || defined(__Userspace_os_FreeBSD) || defined(__Userspace_os_OpenBSD) || defined (__Userspace_os_Windows) || defined(__Userspace_os_NaCl) +#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(_WIN32) || defined(__native_client__) #define UIO_MAXIOV 1024 #define ERESTART (-1) #endif -#if !defined(__Userspace_os_Darwin) && !defined(__Userspace_os_NetBSD) && !defined(__Userspace_os_OpenBSD) +#if !defined(__APPLE__) && !defined(__NetBSD__) && !defined(__OpenBSD__) enum uio_rw { UIO_READ, UIO_WRITE }; #endif -#if !defined(__Userspace_os_NetBSD) && !defined(__Userspace_os_OpenBSD) +#if !defined(__NetBSD__) && !defined(__OpenBSD__) /* Segment flag values. */ enum uio_seg { UIO_USERSPACE, /* from user data space */ @@ -100,7 +100,7 @@ struct uio { * handle on protocol and pointer to protocol * private data and error information. */ -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) #define AF_ROUTE 17 #if !defined(__MINGW32__) typedef __int32 pid_t; @@ -237,7 +237,7 @@ struct socket { * avoid defining a lock order between listen and accept sockets * until such time as it proves to be a good idea. */ -#if defined(__Userspace_os_Windows) +#if defined(_WIN32) extern userland_mutex_t accept_mtx; extern userland_cond_t accept_cond; #define ACCEPT_LOCK_ASSERT() @@ -272,7 +272,7 @@ extern userland_cond_t accept_cond; * buffer. */ #define SOCKBUF_MTX(_sb) (&(_sb)->sb_mtx) -#if defined (__Userspace_os_Windows) +#if defined(_WIN32) #define SOCKBUF_LOCK_INIT(_sb, _name) \ InitializeCriticalSection(SOCKBUF_MTX(_sb)) #define SOCKBUF_LOCK_DESTROY(_sb) DeleteCriticalSection(SOCKBUF_MTX(_sb)) @@ -373,340 +373,6 @@ extern userland_cond_t accept_cond; #define SCTP_EVENT_WRITE 0x0002 /* socket is writeable */ #define SCTP_EVENT_ERROR 0x0004 /* socket has an error state */ -/* - * Externalized form of struct socket used by the sysctl(3) interface. - */ -struct xsocket { - size_t xso_len; /* length of this structure */ - struct socket *xso_so; /* makes a convenient handle sometimes */ - short so_type; - short so_options; - short so_linger; - short so_state; - caddr_t so_pcb; /* another convenient handle */ - int xso_protocol; - int xso_family; - u_short so_qlen; - u_short so_incqlen; - u_short so_qlimit; - short so_timeo; - u_short so_error; - pid_t so_pgid; - u_long so_oobmark; - struct xsockbuf { - u_int sb_cc; - u_int sb_hiwat; - u_int sb_mbcnt; - u_int sb_mbmax; - int sb_lowat; - int sb_timeo; - short sb_flags; - } so_rcv, so_snd; - uid_t so_uid; /* XXX */ -}; - -#if defined(_KERNEL) - - -/* - * Macros for sockets and socket buffering. - */ - -/* - * Do we need to notify the other side when I/O is possible? - */ -#define sb_notify(sb) (((sb)->sb_flags & (SB_WAIT | SB_SEL | SB_ASYNC | \ - SB_UPCALL | SB_AIO | SB_KNOTE)) != 0) - -/* - * How much space is there in a socket buffer (so->so_snd or so->so_rcv)? - * This is problematical if the fields are unsigned, as the space might - * still be negative (cc > hiwat or mbcnt > mbmax). Should detect - * overflow and return 0. Should use "lmin" but it doesn't exist now. - */ -#define sbspace(sb) \ - ((long) imin((int)((sb)->sb_hiwat - (sb)->sb_cc), \ - (int)((sb)->sb_mbmax - (sb)->sb_mbcnt))) - -/* do we have to send all at once on a socket? */ -#define sosendallatonce(so) \ - ((so)->so_proto->pr_flags & PR_ATOMIC) - -/* can we read something from so? */ -#define soreadable(so) \ - ((so)->so_rcv.sb_cc >= (so)->so_rcv.sb_lowat || \ - ((so)->so_rcv.sb_state & SBS_CANTRCVMORE) || \ - !TAILQ_EMPTY(&(so)->so_comp) || (so)->so_error) - -/* can we write something to so? */ -#define sowriteable(so) \ - ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \ - (((so)->so_state&SS_ISCONNECTED) || \ - ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \ - ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \ - (so)->so_error) - -/* adjust counters in sb reflecting allocation of m */ -#define sballoc(sb, m) { \ - (sb)->sb_cc += (m)->m_len; \ - if ((m)->m_type != MT_DATA && (m)->m_type != MT_OOBDATA) \ - (sb)->sb_ctl += (m)->m_len; \ - (sb)->sb_mbcnt += MSIZE; \ - if ((m)->m_flags & M_EXT) \ - (sb)->sb_mbcnt += (m)->m_ext.ext_size; \ -} - -/* adjust counters in sb reflecting freeing of m */ -#define sbfree(sb, m) { \ - (sb)->sb_cc -= (m)->m_len; \ - if ((m)->m_type != MT_DATA && (m)->m_type != MT_OOBDATA) \ - (sb)->sb_ctl -= (m)->m_len; \ - (sb)->sb_mbcnt -= MSIZE; \ - if ((m)->m_flags & M_EXT) \ - (sb)->sb_mbcnt -= (m)->m_ext.ext_size; \ - if ((sb)->sb_sndptr == (m)) { \ - (sb)->sb_sndptr = NULL; \ - (sb)->sb_sndptroff = 0; \ - } \ - if ((sb)->sb_sndptroff != 0) \ - (sb)->sb_sndptroff -= (m)->m_len; \ -} - -/* - * soref()/sorele() ref-count the socket structure. Note that you must - * still explicitly close the socket, but the last ref count will free - * the structure. - */ -#define soref(so) do { \ - SOCK_LOCK_ASSERT(so); \ - ++(so)->so_count; \ -} while (0) - -#define sorele(so) do { \ - ACCEPT_LOCK_ASSERT(); \ - SOCK_LOCK_ASSERT(so); \ - KASSERT((so)->so_count > 0, ("sorele")); \ - if (--(so)->so_count == 0) \ - sofree(so); \ - else { \ - SOCK_UNLOCK(so); \ - ACCEPT_UNLOCK(); \ - } \ -} while (0) - -#define sotryfree(so) do { \ - ACCEPT_LOCK_ASSERT(); \ - SOCK_LOCK_ASSERT(so); \ - if ((so)->so_count == 0) \ - sofree(so); \ - else { \ - SOCK_UNLOCK(so); \ - ACCEPT_UNLOCK(); \ - } \ -} while(0) - -/* - * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to - * avoid a non-atomic test-and-wakeup. However, sowakeup is - * responsible for releasing the lock if it is called. We unlock only - * if we don't call into sowakeup. If any code is introduced that - * directly invokes the underlying sowakeup() primitives, it must - * maintain the same semantics. - */ -#define sorwakeup_locked(so) do { \ - SOCKBUF_LOCK_ASSERT(&(so)->so_rcv); \ - if (sb_notify(&(so)->so_rcv)) \ - sowakeup((so), &(so)->so_rcv); \ - else \ - SOCKBUF_UNLOCK(&(so)->so_rcv); \ -} while (0) - -#define sorwakeup(so) do { \ - SOCKBUF_LOCK(&(so)->so_rcv); \ - sorwakeup_locked(so); \ -} while (0) - -#define sowwakeup_locked(so) do { \ - SOCKBUF_LOCK_ASSERT(&(so)->so_snd); \ - if (sb_notify(&(so)->so_snd)) \ - sowakeup((so), &(so)->so_snd); \ - else \ - SOCKBUF_UNLOCK(&(so)->so_snd); \ -} while (0) - -#define sowwakeup(so) do { \ - SOCKBUF_LOCK(&(so)->so_snd); \ - sowwakeup_locked(so); \ -} while (0) - -/* - * Argument structure for sosetopt et seq. This is in the KERNEL - * section because it will never be visible to user code. - */ -enum sopt_dir { SOPT_GET, SOPT_SET }; -struct sockopt { - enum sopt_dir sopt_dir; /* is this a get or a set? */ - int sopt_level; /* second arg of [gs]etsockopt */ - int sopt_name; /* third arg of [gs]etsockopt */ - void *sopt_val; /* fourth arg of [gs]etsockopt */ - size_t sopt_valsize; /* (almost) fifth arg of [gs]etsockopt */ - struct thread *sopt_td; /* calling thread or null if kernel */ -}; - -struct accept_filter { - char accf_name[16]; - void (*accf_callback) - (struct socket *so, void *arg, int waitflag); - void * (*accf_create) - (struct socket *so, char *arg); - void (*accf_destroy) - (struct socket *so); - SLIST_ENTRY(accept_filter) accf_next; -}; - -extern int maxsockets; -extern u_long sb_max; -extern struct uma_zone *socket_zone; -extern so_gen_t so_gencnt; - -struct mbuf; -struct sockaddr; -struct ucred; -struct uio; - -/* - * From uipc_socket and friends - */ -int do_getopt_accept_filter(struct socket *so, struct sockopt *sopt); -int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); -int so_setsockopt(struct socket *so, int level, int optname, - void *optval, size_t optlen); -int sockargs(struct mbuf **mp, caddr_t buf, int buflen, int type); -int getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len); -void sbappend(struct sockbuf *sb, struct mbuf *m); -void sbappend_locked(struct sockbuf *sb, struct mbuf *m); -void sbappendstream(struct sockbuf *sb, struct mbuf *m); -void sbappendstream_locked(struct sockbuf *sb, struct mbuf *m); -int sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, - struct mbuf *m0, struct mbuf *control); -int sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa, - struct mbuf *m0, struct mbuf *control); -int sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, - struct mbuf *control); -int sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0, - struct mbuf *control); -void sbappendrecord(struct sockbuf *sb, struct mbuf *m0); -void sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0); -void sbcheck(struct sockbuf *sb); -void sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n); -struct mbuf * - sbcreatecontrol(caddr_t p, int size, int type, int level); -void sbdestroy(struct sockbuf *sb, struct socket *so); -void sbdrop(struct sockbuf *sb, int len); -void sbdrop_locked(struct sockbuf *sb, int len); -void sbdroprecord(struct sockbuf *sb); -void sbdroprecord_locked(struct sockbuf *sb); -void sbflush(struct sockbuf *sb); -void sbflush_locked(struct sockbuf *sb); -void sbrelease(struct sockbuf *sb, struct socket *so); -void sbrelease_locked(struct sockbuf *sb, struct socket *so); -int sbreserve(struct sockbuf *sb, u_long cc, struct socket *so, - struct thread *td); -int sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so, - struct thread *td); -struct mbuf * - sbsndptr(struct sockbuf *sb, u_int off, u_int len, u_int *moff); -void sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb); -int sbwait(struct sockbuf *sb); -int sblock(struct sockbuf *sb, int flags); -void sbunlock(struct sockbuf *sb); -void soabort(struct socket *so); -int soaccept(struct socket *so, struct sockaddr **nam); -int socheckuid(struct socket *so, uid_t uid); -int sobind(struct socket *so, struct sockaddr *nam, struct thread *td); -void socantrcvmore(struct socket *so); -void socantrcvmore_locked(struct socket *so); -void socantsendmore(struct socket *so); -void socantsendmore_locked(struct socket *so); -int soclose(struct socket *so); -int soconnect(struct socket *so, struct sockaddr *nam, struct thread *td); -int soconnect2(struct socket *so1, struct socket *so2); -int socow_setup(struct mbuf *m0, struct uio *uio); -int socreate(int dom, struct socket **aso, int type, int proto, - struct ucred *cred, struct thread *td); -int sodisconnect(struct socket *so); -struct sockaddr *sodupsockaddr(const struct sockaddr *sa, int mflags); -void sofree(struct socket *so); -int sogetopt(struct socket *so, struct sockopt *sopt); -void sohasoutofband(struct socket *so); -void soisconnected(struct socket *so); -void soisconnecting(struct socket *so); -void soisdisconnected(struct socket *so); -void soisdisconnecting(struct socket *so); -int solisten(struct socket *so, int backlog, struct thread *td); -void solisten_proto(struct socket *so, int backlog); -int solisten_proto_check(struct socket *so); -struct socket * - sonewconn(struct socket *head, int connstatus); -int sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen); -int sooptcopyout(struct sockopt *sopt, const void *buf, size_t len); - -/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ -int soopt_getm(struct sockopt *sopt, struct mbuf **mp); -int soopt_mcopyin(struct sockopt *sopt, struct mbuf *m); -int soopt_mcopyout(struct sockopt *sopt, struct mbuf *m); - -int sopoll(struct socket *so, int events, struct ucred *active_cred, - struct thread *td); -int sopoll_generic(struct socket *so, int events, - struct ucred *active_cred, struct thread *td); -int soreceive(struct socket *so, struct sockaddr **paddr, struct uio *uio, - struct mbuf **mp0, struct mbuf **controlp, int *flagsp); -int soreceive_generic(struct socket *so, struct sockaddr **paddr, - struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, - int *flagsp); -int soreserve(struct socket *so, u_long sndcc, u_long rcvcc); -void sorflush(struct socket *so); -int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, - struct mbuf *top, struct mbuf *control, int flags, - struct thread *td); -int sosend_dgram(struct socket *so, struct sockaddr *addr, - struct uio *uio, struct mbuf *top, struct mbuf *control, - int flags, struct thread *td); -int sosend_generic(struct socket *so, struct sockaddr *addr, - struct uio *uio, struct mbuf *top, struct mbuf *control, - int flags, struct thread *td); -int sosetopt(struct socket *so, struct sockopt *sopt); -int soshutdown(struct socket *so, int how); -void sotoxsocket(struct socket *so, struct xsocket *xso); -void sowakeup(struct socket *so, struct sockbuf *sb); - -#ifdef SOCKBUF_DEBUG -void sblastrecordchk(struct sockbuf *, const char *, int); -#define SBLASTRECORDCHK(sb) sblastrecordchk((sb), __FILE__, __LINE__) - -void sblastmbufchk(struct sockbuf *, const char *, int); -#define SBLASTMBUFCHK(sb) sblastmbufchk((sb), __FILE__, __LINE__) -#else -#define SBLASTRECORDCHK(sb) /* nothing */ -#define SBLASTMBUFCHK(sb) /* nothing */ -#endif /* SOCKBUF_DEBUG */ - -/* - * Accept filter functions (duh). - */ -int accept_filt_add(struct accept_filter *filt); -int accept_filt_del(char *name); -struct accept_filter *accept_filt_get(char *name); -#ifdef ACCEPT_FILTER_MOD -#ifdef SYSCTL_DECL -SYSCTL_DECL(_net_inet_accf); -#endif -int accept_filt_generic_mod_event(module_t mod, int event, void *data); -#endif - -#endif /* _KERNEL */ - /*-------------------------------------------------------------*/ /*-------------------------------------------------------------*/ @@ -717,7 +383,6 @@ int accept_filt_generic_mod_event(module_t mod, int event, void *data); * above into, avoiding having to port the entire thing at once... * For function prototypes, the full bodies are in user_socket.c . */ -#if defined(__Userspace__) /* ---------------------------------------------------------- */ /* --- function prototypes (implemented in user_socket.c) --- */ @@ -792,9 +457,7 @@ extern int sctp_listen(struct socket *so, int backlog, struct proc *p); extern void socantrcvmore_locked(struct socket *so); extern int sctp_bind(struct socket *so, struct sockaddr *addr); extern int sctp6_bind(struct socket *so, struct sockaddr *addr, void *proc); -#if defined(__Userspace__) extern int sctpconn_bind(struct socket *so, struct sockaddr *addr); -#endif extern int sctp_accept(struct socket *so, struct sockaddr **addr); extern int sctp_attach(struct socket *so, int proto, uint32_t vrf_id); extern int sctp6_attach(struct socket *so, int proto, uint32_t vrf_id); @@ -813,9 +476,7 @@ extern int soconnect(struct socket *so, struct sockaddr *nam); extern int sctp_disconnect(struct socket *so); extern int sctp_connect(struct socket *so, struct sockaddr *addr); extern int sctp6_connect(struct socket *so, struct sockaddr *addr); -#if defined(__Userspace__) extern int sctpconn_connect(struct socket *so, struct sockaddr *addr); -#endif extern void sctp_finish(void); /* ------------------------------------------------ */ @@ -863,8 +524,4 @@ extern void sctp_finish(void); sowwakeup_locked(so); \ } while (0) - - -#endif /* __Userspace__ */ - #endif /* !_SYS_SOCKETVAR_H_ */ diff --git a/TMessagesProj/jni/third_party/usrsctplib/user_uma.h b/TMessagesProj/jni/third_party/usrsctplib/user_uma.h index e20d2ada6..59d71fff4 100755 --- a/TMessagesProj/jni/third_party/usrsctplib/user_uma.h +++ b/TMessagesProj/jni/third_party/usrsctplib/user_uma.h @@ -83,14 +83,14 @@ struct uma_zone { /* Prototype */ uma_zone_t uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor, - uma_init uminit, uma_fini fini, int align, u_int32_t flags); + uma_init uminit, uma_fini fini, int align, uint32_t flags); #define uma_zone_set_max(zone, number) /* stub TODO __Userspace__ */ uma_zone_t uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor, - uma_init uminit, uma_fini fini, int align, u_int32_t flags) { + uma_init uminit, uma_fini fini, int align, uint32_t flags) { return NULL; /* stub TODO __Userspace__. Also place implementation in a separate .c file */ } #endif diff --git a/TMessagesProj/jni/third_party/usrsctplib/usrsctp.h b/TMessagesProj/jni/third_party/usrsctplib/usrsctp.h index 37d4828f0..e0c17c3a0 100644 --- a/TMessagesProj/jni/third_party/usrsctplib/usrsctp.h +++ b/TMessagesProj/jni/third_party/usrsctplib/usrsctp.h @@ -64,19 +64,19 @@ extern "C" { #elif defined(SCTP_STDINT_INCLUDE) #include SCTP_STDINT_INCLUDE #else -#define uint8_t unsigned __int8 -#define uint16_t unsigned __int16 -#define uint32_t unsigned __int32 -#define uint64_t unsigned __int64 -#define int16_t __int16 -#define int32_t __int32 +typedef unsigned __int8 uint8_t; +typedef unsigned __int16 uint16_t; +typedef unsigned __int32 uint32_t; +typedef unsigned __int64 uint64_t; +typedef __int16 int16_t; +typedef __int32 int32_t; #endif #ifndef ssize_t #ifdef _WIN64 -#define ssize_t __int64 +typedef __int64 ssize_t; #elif defined _WIN32 -#define ssize_t int +typedef int ssize_t; #else #error "Unknown platform!" #endif @@ -281,12 +281,13 @@ struct sctp_assoc_change { #define SCTP_CANT_STR_ASSOC 0x0005 /* sac_info values */ -#define SCTP_ASSOC_SUPPORTS_PR 0x01 -#define SCTP_ASSOC_SUPPORTS_AUTH 0x02 -#define SCTP_ASSOC_SUPPORTS_ASCONF 0x03 -#define SCTP_ASSOC_SUPPORTS_MULTIBUF 0x04 -#define SCTP_ASSOC_SUPPORTS_RE_CONFIG 0x05 -#define SCTP_ASSOC_SUPPORTS_MAX 0x05 +#define SCTP_ASSOC_SUPPORTS_PR 0x01 +#define SCTP_ASSOC_SUPPORTS_AUTH 0x02 +#define SCTP_ASSOC_SUPPORTS_ASCONF 0x03 +#define SCTP_ASSOC_SUPPORTS_MULTIBUF 0x04 +#define SCTP_ASSOC_SUPPORTS_RE_CONFIG 0x05 +#define SCTP_ASSOC_SUPPORTS_INTERLEAVING 0x06 +#define SCTP_ASSOC_SUPPORTS_MAX 0x06 /* Address event */ struct sctp_paddr_change { @@ -903,7 +904,7 @@ struct socket * usrsctp_socket(int domain, int type, int protocol, int (*receive_cb)(struct socket *sock, union sctp_sockstore addr, void *data, size_t datalen, struct sctp_rcvinfo, int flags, void *ulp_info), - int (*send_cb)(struct socket *sock, uint32_t sb_free), + int (*send_cb)(struct socket *sock, uint32_t sb_free, void *ulp_info), uint32_t sb_threshold, void *ulp_info); @@ -1032,6 +1033,9 @@ usrsctp_deregister_address(void *); int usrsctp_set_ulpinfo(struct socket *, void *); +int +usrsctp_get_ulpinfo(struct socket *, void **); + int usrsctp_set_upcall(struct socket *so, void (*upcall)(struct socket *, void *, int), @@ -1042,7 +1046,7 @@ usrsctp_get_events(struct socket *so); void -usrsctp_handle_timers(uint32_t delta); +usrsctp_handle_timers(uint32_t elapsed_milliseconds); #define SCTP_DUMP_OUTBOUND 1 #define SCTP_DUMP_INBOUND 0 diff --git a/TMessagesProj/jni/voip/CMakeLists.txt b/TMessagesProj/jni/voip/CMakeLists.txt index af1ff8ef2..24a55ba22 100644 --- a/TMessagesProj/jni/voip/CMakeLists.txt +++ b/TMessagesProj/jni/voip/CMakeLists.txt @@ -1,5 +1,6 @@ cmake_minimum_required(VERSION 3.6.0) +set(CMAKE_CXX_FLAGS "-std=c++17 -DANDROID -g") #tgvoip add_library(tgvoip STATIC @@ -289,9 +290,7 @@ add_library(tgcalls_tp STATIC third_party/usrsctplib/user_mbuf.c third_party/usrsctplib/user_recv_thread.c third_party/usrsctplib/user_socket.c - voip/webrtc/absl/base/dynamic_annotations.cc voip/webrtc/absl/base/internal/cycleclock.cc - voip/webrtc/absl/base/internal/exception_safety_testing.cc voip/webrtc/absl/base/internal/exponential_biased.cc voip/webrtc/absl/base/internal/low_level_alloc.cc voip/webrtc/absl/base/internal/periodic_sampler.cc @@ -326,8 +325,6 @@ add_library(tgcalls_tp STATIC voip/webrtc/absl/flags/internal/commandlineflag.cc voip/webrtc/absl/flags/internal/flag.cc voip/webrtc/absl/flags/internal/program_name.cc - voip/webrtc/absl/flags/internal/registry.cc - voip/webrtc/absl/flags/internal/type_erased.cc voip/webrtc/absl/flags/internal/usage.cc voip/webrtc/absl/flags/marshalling.cc voip/webrtc/absl/flags/parse.cc @@ -446,16 +443,17 @@ add_library(tgcalls STATIC voip/tgcalls/AudioDeviceHelper.cpp voip/tgcalls/SctpDataChannelProviderInterfaceImpl.cpp voip/tgcalls/TurnCustomizerImpl.cpp - voip/tgcalls/reference/InstanceImplReference.cpp voip/tgcalls/legacy/InstanceImplLegacy.cpp voip/tgcalls/group/GroupNetworkManager.cpp voip/tgcalls/group/GroupInstanceCustomImpl.cpp voip/tgcalls/group/GroupJoinPayloadInternal.cpp voip/tgcalls/group/AudioStreamingPart.cpp - voip/tgcalls/group/VideoStreamingPart.cpp + voip/tgcalls/group/AudioStreamingPartInternal.cpp + voip/tgcalls/group/AudioStreamingPartPersistentDecoder.cpp + voip/tgcalls/group/AVIOContextImpl.cpp voip/tgcalls/group/StreamingMediaContext.cpp - voip/tgcalls/third-party/json11.cpp - + voip/tgcalls/group/VideoStreamingPart.cpp + voip/webrtc/rtc_base/bitstream_reader.cc voip/webrtc/rtc_base/async_invoker.cc voip/webrtc/rtc_base/system_time.cc voip/webrtc/rtc_base/async_resolver.cc @@ -478,6 +476,7 @@ add_library(tgcalls STATIC voip/webrtc/rtc_base/boringssl_identity.cc voip/webrtc/rtc_base/experiments/alr_experiment.cc voip/webrtc/rtc_base/experiments/balanced_degradation_settings.cc + voip/webrtc/rtc_base/experiments/bandwidth_quality_scaler_settings.cc voip/webrtc/rtc_base/experiments/cpu_speed_experiment.cc voip/webrtc/rtc_base/experiments/encoder_info_settings.cc voip/webrtc/rtc_base/experiments/field_trial_list.cc @@ -490,10 +489,12 @@ add_library(tgcalls STATIC voip/webrtc/rtc_base/experiments/quality_rampup_experiment.cc voip/webrtc/rtc_base/experiments/quality_scaler_settings.cc voip/webrtc/rtc_base/experiments/quality_scaling_experiment.cc + voip/webrtc/rtc_base/experiments/quality_rampup_experiment.cc voip/webrtc/rtc_base/experiments/rate_control_settings.cc voip/webrtc/rtc_base/experiments/rtt_mult_experiment.cc voip/webrtc/rtc_base/experiments/stable_target_rate_experiment.cc voip/webrtc/rtc_base/experiments/struct_parameters_parser.cc + voip/webrtc/rtc_base/experiments/bandwidth_quality_scaler_settings.cc voip/webrtc/rtc_base/file_rotating_stream.cc voip/webrtc/rtc_base/helpers.cc voip/webrtc/rtc_base/http_common.cc @@ -583,6 +584,7 @@ add_library(tgcalls STATIC voip/webrtc/rtc_base/callback_list.cc voip/webrtc/rtc_base/deprecated/recursive_critical_section.cc voip/webrtc/rtc_base/internal/default_socket_server.cc + voip/webrtc/api/adaptation/resource.cc voip/webrtc/api/audio/audio_frame.cc voip/webrtc/api/audio/channel_layout.cc voip/webrtc/api/audio/echo_canceller3_config.cc @@ -629,8 +631,8 @@ add_library(tgcalls STATIC voip/webrtc/api/neteq/default_neteq_controller_factory.cc voip/webrtc/api/neteq/neteq.cc voip/webrtc/api/neteq/tick_timer.cc + voip/webrtc/api/numerics/samples_stats_counter.cc voip/webrtc/api/peer_connection_interface.cc - voip/webrtc/api/proxy.cc voip/webrtc/api/rtc_error.cc voip/webrtc/api/rtc_event_log/rtc_event.cc voip/webrtc/api/rtc_event_log/rtc_event_log.cc @@ -655,7 +657,10 @@ add_library(tgcalls STATIC voip/webrtc/api/video/video_adaptation_counters.cc voip/webrtc/api/video/video_frame_metadata.cc voip/webrtc/api/voip/voip_engine_factory.cc + voip/webrtc/api/video/i444_buffer.cc + voip/webrtc/api/video/rtp_video_frame_assembler.cc voip/webrtc/api/numerics/samples_stats_counter.cc + voip/webrtc/api/wrapping_async_dns_resolver.cc voip/webrtc/call/adaptation/adaptation_constraint.cc voip/webrtc/call/adaptation/broadcast_resource_listener.cc voip/webrtc/call/adaptation/degradation_preference_provider.cc @@ -721,6 +726,7 @@ add_library(tgcalls STATIC voip/webrtc/pc/jsep_ice_candidate.cc voip/webrtc/pc/jsep_session_description.cc voip/webrtc/pc/jsep_transport.cc + voip/webrtc/pc/jsep_transport_collection.cc voip/webrtc/pc/jsep_transport_controller.cc voip/webrtc/pc/local_audio_source.cc voip/webrtc/pc/media_protocol_names.cc @@ -757,6 +763,7 @@ add_library(tgcalls STATIC voip/webrtc/pc/video_rtp_track_source.cc voip/webrtc/pc/video_track.cc voip/webrtc/pc/video_track_source.cc + voip/webrtc/pc/video_track_source_proxy.cc voip/webrtc/pc/webrtc_sdp.cc voip/webrtc/pc/webrtc_session_description_factory.cc voip/webrtc/pc/connection_context.cc @@ -768,7 +775,6 @@ add_library(tgcalls STATIC voip/webrtc/sdk/android/src/jni/pc/add_ice_candidate_observer.cc voip/webrtc/media/base/adapted_video_track_source.cc voip/webrtc/media/base/codec.cc - voip/webrtc/media/base/h264_profile_level_id.cc voip/webrtc/media/base/media_channel.cc voip/webrtc/media/base/media_constants.cc voip/webrtc/media/base/media_engine.cc @@ -804,6 +810,7 @@ add_library(tgcalls STATIC voip/webrtc/system_wrappers/source/metrics.cc voip/webrtc/system_wrappers/source/rtp_to_ntp_estimator.cc voip/webrtc/system_wrappers/source/sleep.cc + voip/webrtc/system_wrappers/source/denormal_disabler.cc voip/webrtc/modules/audio_coding/acm2/acm_receiver.cc voip/webrtc/modules/audio_coding/acm2/acm_remixing.cc voip/webrtc/modules/audio_coding/acm2/acm_resampler.cc @@ -1007,6 +1014,9 @@ add_library(tgcalls STATIC voip/webrtc/modules/audio_coding/neteq/sync_buffer.cc voip/webrtc/modules/audio_coding/neteq/time_stretch.cc voip/webrtc/modules/audio_coding/neteq/timestamp_scaler.cc + voip/webrtc/modules/audio_coding/neteq/reorder_optimizer.cc + voip/webrtc/modules/audio_coding/neteq/underrun_optimizer.cc + voip/webrtc/modules/audio_coding/neteq/relative_arrival_delay_tracker.cc voip/webrtc/modules/audio_device/audio_device_buffer.cc voip/webrtc/modules/audio_device/audio_device_data_observer.cc voip/webrtc/modules/audio_device/audio_device_generic.cc @@ -1088,24 +1098,25 @@ add_library(tgcalls STATIC voip/webrtc/modules/audio_processing/agc/legacy/digital_agc.cc voip/webrtc/modules/audio_processing/agc/loudness_histogram.cc voip/webrtc/modules/audio_processing/agc/utility.cc - voip/webrtc/modules/audio_processing/agc2/adaptive_agc.cc + voip/webrtc/modules/audio_processing/agc/clipping_predictor.cc + voip/webrtc/modules/audio_processing/agc/clipping_predictor_evaluator.cc + voip/webrtc/modules/audio_processing/agc/clipping_predictor_level_buffer.cc + voip/webrtc/modules/audio_processing/agc/analog_gain_stats_reporter.cc + voip/webrtc/modules/audio_processing/agc2/adaptive_digital_gain_controller.cc + voip/webrtc/modules/audio_processing/agc2/vad_wrapper.cc voip/webrtc/modules/audio_processing/agc2/cpu_features.cc voip/webrtc/modules/audio_processing/agc2/adaptive_digital_gain_applier.cc voip/webrtc/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc voip/webrtc/modules/audio_processing/agc2/agc2_testing_common.cc voip/webrtc/modules/audio_processing/agc2/biquad_filter.cc voip/webrtc/modules/audio_processing/agc2/compute_interpolated_gain_curve.cc - voip/webrtc/modules/audio_processing/agc2/down_sampler.cc voip/webrtc/modules/audio_processing/agc2/fixed_digital_level_estimator.cc voip/webrtc/modules/audio_processing/agc2/gain_applier.cc voip/webrtc/modules/audio_processing/agc2/interpolated_gain_curve.cc voip/webrtc/modules/audio_processing/agc2/limiter.cc voip/webrtc/modules/audio_processing/agc2/limiter_db_gain_curve.cc voip/webrtc/modules/audio_processing/agc2/noise_level_estimator.cc - voip/webrtc/modules/audio_processing/agc2/noise_spectrum_estimator.cc voip/webrtc/modules/audio_processing/agc2/saturation_protector.cc - voip/webrtc/modules/audio_processing/agc2/signal_classifier.cc - voip/webrtc/modules/audio_processing/agc2/vad_with_level.cc voip/webrtc/modules/audio_processing/agc2/vector_float_frame.cc voip/webrtc/modules/audio_processing/agc2/saturation_protector_buffer.cc voip/webrtc/modules/audio_processing/agc2/rnn_vad/auto_correlation.cc @@ -1133,8 +1144,6 @@ add_library(tgcalls STATIC voip/webrtc/modules/audio_processing/include/audio_frame_proxies.cc voip/webrtc/modules/audio_processing/include/audio_processing.cc voip/webrtc/modules/audio_processing/include/audio_processing_statistics.cc - voip/webrtc/modules/audio_processing/include/config.cc - voip/webrtc/modules/audio_processing/level_estimator.cc voip/webrtc/modules/audio_processing/logging/apm_data_dumper.cc voip/webrtc/modules/audio_processing/ns/fast_math.cc voip/webrtc/modules/audio_processing/ns/histograms.cc @@ -1172,7 +1181,6 @@ add_library(tgcalls STATIC voip/webrtc/modules/audio_processing/vad/vad_audio_proc.cc voip/webrtc/modules/audio_processing/vad/vad_circular_buffer.cc voip/webrtc/modules/audio_processing/vad/voice_activity_detector.cc - voip/webrtc/modules/audio_processing/voice_detection.cc voip/webrtc/modules/audio_processing/optionally_built_submodule_creators.cc voip/webrtc/modules/audio_processing/capture_levels_adjuster/capture_levels_adjuster.cc voip/webrtc/modules/audio_processing/capture_levels_adjuster/audio_samples_scaler.cc @@ -1202,6 +1210,8 @@ add_library(tgcalls STATIC voip/webrtc/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc voip/webrtc/modules/congestion_controller/goog_cc/trendline_estimator.cc voip/webrtc/modules/congestion_controller/goog_cc/inter_arrival_delta.cc + voip/webrtc/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc + voip/webrtc/modules/congestion_controller/goog_cc/loss_based_bwe_v2.cc voip/webrtc/modules/pacing/bitrate_prober.cc voip/webrtc/modules/pacing/interval_budget.cc voip/webrtc/modules/pacing/paced_sender.cc @@ -1214,7 +1224,6 @@ add_library(tgcalls STATIC voip/webrtc/modules/rtp_rtcp/source/absolute_capture_time_interpolator.cc voip/webrtc/modules/rtp_rtcp/source/capture_clock_offset_updater.cc voip/webrtc/modules/rtp_rtcp/source/active_decode_targets_helper.cc - voip/webrtc/modules/rtp_rtcp/source/absolute_capture_time_receiver.cc voip/webrtc/modules/rtp_rtcp/source/absolute_capture_time_sender.cc voip/webrtc/modules/rtp_rtcp/source/create_video_rtp_depacketizer.cc voip/webrtc/modules/rtp_rtcp/source/dtmf_queue.cc @@ -1288,7 +1297,6 @@ add_library(tgcalls STATIC voip/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc voip/webrtc/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc voip/webrtc/modules/rtp_rtcp/source/rtp_sequence_number_map.cc - voip/webrtc/modules/rtp_rtcp/source/rtp_utility.cc voip/webrtc/modules/rtp_rtcp/source/rtp_video_header.cc voip/webrtc/modules/rtp_rtcp/source/source_tracker.cc voip/webrtc/modules/rtp_rtcp/source/time_util.cc @@ -1308,6 +1316,7 @@ add_library(tgcalls STATIC voip/webrtc/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.cc voip/webrtc/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.cc voip/webrtc/modules/rtp_rtcp/source/packet_sequencer.cc + voip/webrtc/modules/rtp_rtcp/source/rtp_util.cc voip/webrtc/modules/utility/source/helpers_android.cc voip/webrtc/modules/utility/source/jvm_android.cc voip/webrtc/modules/utility/source/process_thread_impl.cc @@ -1318,7 +1327,7 @@ add_library(tgcalls STATIC voip/webrtc/modules/video_capture/video_capture_impl.cc voip/webrtc/modules/video_coding/codec_timer.cc voip/webrtc/modules/video_coding/codecs/av1/libaom_av1_decoder_absent.cc - voip/webrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_absent.cc + voip/webrtc/modules/video_coding/codecs/av1/libaom_av1_encoder_supported.cc voip/webrtc/modules/video_coding/codecs/h264/h264.cc voip/webrtc/modules/video_coding/codecs/h264/h264_color_space.cc voip/webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc @@ -1360,7 +1369,6 @@ add_library(tgcalls STATIC voip/webrtc/modules/video_coding/utility/decoded_frames_history.cc voip/webrtc/modules/video_coding/utility/qp_parser.cc voip/webrtc/modules/video_coding/utility/frame_dropper.cc - voip/webrtc/modules/video_coding/utility/framerate_controller.cc voip/webrtc/modules/video_coding/utility/ivf_file_reader.cc voip/webrtc/modules/video_coding/utility/ivf_file_writer.cc voip/webrtc/modules/video_coding/utility/quality_scaler.cc @@ -1368,6 +1376,8 @@ add_library(tgcalls STATIC voip/webrtc/modules/video_coding/utility/simulcast_utility.cc voip/webrtc/modules/video_coding/utility/vp8_header_parser.cc voip/webrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.cc + voip/webrtc/modules/video_coding/utility/bandwidth_quality_scaler.cc + voip/webrtc/modules/video_coding/utility/framerate_controller_deprecated.cc voip/webrtc/modules/video_coding/video_codec_initializer.cc voip/webrtc/modules/video_coding/video_coding_defines.cc voip/webrtc/modules/video_coding/video_coding_impl.cc @@ -1397,6 +1407,10 @@ add_library(tgcalls STATIC voip/webrtc/modules/video_coding/rtp_vp9_ref_finder.cc voip/webrtc/modules/video_coding/rtp_generic_ref_finder.cc voip/webrtc/modules/video_coding/codecs/av1/av1_svc_config.cc + voip/webrtc/modules/video_coding/nack_requester.cc + voip/webrtc/modules/video_coding/frame_buffer3.cc + voip/webrtc/modules/video_coding/frame_helpers.cc + voip/webrtc/modules/video_coding/h264_packet_buffer.cc voip/webrtc/modules/video_processing/util/denoiser_filter.cc voip/webrtc/modules/video_processing/util/denoiser_filter_c.cc voip/webrtc/modules/video_processing/util/noise_estimation.cc @@ -1496,6 +1510,7 @@ add_library(tgcalls STATIC voip/webrtc/common_video/h264/sps_vui_rewriter.cc voip/webrtc/common_video/h265/h265_bitstream_parser.cc voip/webrtc/common_video/h265/h265_common.cc + voip/webrtc/common_video/h265/legacy_bit_buffer.cc voip/webrtc/common_video/h265/h265_pps_parser.cc voip/webrtc/common_video/h265/h265_sps_parser.cc voip/webrtc/common_video/h265/h265_vps_parser.cc @@ -1504,6 +1519,7 @@ add_library(tgcalls STATIC voip/webrtc/common_video/video_frame_buffer.cc voip/webrtc/common_video/video_render_frames.cc voip/webrtc/common_video/video_frame_buffer_pool.cc + voip/webrtc/common_video/framerate_controller.cc voip/webrtc/p2p/base/async_stun_tcp_socket.cc voip/webrtc/p2p/base/basic_async_resolver_factory.cc voip/webrtc/p2p/base/basic_ice_controller.cc @@ -1536,9 +1552,7 @@ add_library(tgcalls STATIC voip/webrtc/p2p/client/basic_port_allocator.cc voip/webrtc/p2p/client/turn_port_factory.cc voip/webrtc/p2p/stunprober/stun_prober.cc - voip/webrtc/video/adaptation/quality_rampup_experiment_helper.cc voip/webrtc/modules/video_coding/deprecated/nack_module.cc - voip/webrtc/modules/video_coding/nack_module2.cc voip/webrtc/modules/async_audio_processing/async_audio_processing.cc voip/webrtc/logging/rtc_event_log/encoder/blob_encoding.cc voip/webrtc/logging/rtc_event_log/encoder/delta_encoding.cc @@ -1568,6 +1582,10 @@ add_library(tgcalls STATIC voip/webrtc/logging/rtc_event_log/events/rtc_event_rtp_packet_outgoing.cc voip/webrtc/logging/rtc_event_log/events/rtc_event_video_receive_stream_config.cc voip/webrtc/logging/rtc_event_log/events/rtc_event_video_send_stream_config.cc + voip/webrtc/logging/rtc_event_log/events/fixed_length_encoding_parameters_v3.cc + voip/webrtc/logging/rtc_event_log/events/rtc_event_field_encoding.cc + voip/webrtc/logging/rtc_event_log/events/rtc_event_field_encoding_parser.cc + voip/webrtc/logging/rtc_event_log/events/rtc_event_field_extraction.cc voip/webrtc/logging/rtc_event_log/fake_rtc_event_log.cc voip/webrtc/logging/rtc_event_log/fake_rtc_event_log_factory.cc voip/webrtc/logging/rtc_event_log/ice_logger.cc @@ -1581,6 +1599,8 @@ add_library(tgcalls STATIC voip/webrtc/video/adaptation/balanced_constraint.cc voip/webrtc/video/adaptation/bitrate_constraint.cc voip/webrtc/video/adaptation/pixel_limit_resource.cc + voip/webrtc/video/adaptation/quality_rampup_experiment_helper.cc + voip/webrtc/video/adaptation/bandwidth_quality_scaler_resource.cc voip/webrtc/video/buffered_frame_decryptor.cc voip/webrtc/video/call_stats.cc voip/webrtc/video/encoder_bitrate_adjuster.cc @@ -1601,7 +1621,6 @@ add_library(tgcalls STATIC voip/webrtc/video/stream_synchronization.cc voip/webrtc/video/transport_adapter.cc voip/webrtc/video/video_quality_observer.cc - voip/webrtc/video/video_receive_stream.cc voip/webrtc/video/video_send_stream.cc voip/webrtc/video/video_send_stream_impl.cc voip/webrtc/video/video_source_sink_controller.cc @@ -1616,6 +1635,12 @@ add_library(tgcalls STATIC voip/webrtc/video/receive_statistics_proxy2.cc voip/webrtc/video/call_stats2.cc voip/webrtc/video/alignment_adjuster.cc + voip/webrtc/video/frame_buffer_proxy.cc + voip/webrtc/video/decode_synchronizer.cc + voip/webrtc/video/frame_cadence_adapter.cc + voip/webrtc/video/frame_decode_timing.cc + voip/webrtc/video/task_queue_frame_decode_scheduler.cc + voip/webrtc/video/video_receive_stream_timeout_tracker.cc voip/webrtc/audio/audio_level.cc voip/webrtc/audio/audio_receive_stream.cc voip/webrtc/audio/audio_send_stream.cc @@ -1761,7 +1786,7 @@ add_library(voipandroid STATIC voip/webrtc/sdk/android/native_api/video/wrapper.cc voip/webrtc/sdk/android/native_api/network_monitor/network_monitor.cc voip/webrtc/sdk/android/src/jni/android_histogram.cc - voip/webrtc/sdk/android/src/jni/av1_codec.cc + voip/webrtc/sdk/android/src/jni/libaom_av1_codec.cc voip/webrtc/sdk/android/src/jni/egl_base_10_impl.cc voip/webrtc/sdk/android/src/jni/android_metrics.cc voip/webrtc/sdk/android/src/jni/android_network_monitor.cc diff --git a/TMessagesProj/jni/voip/libtgvoip/EchoCanceller.cpp b/TMessagesProj/jni/voip/libtgvoip/EchoCanceller.cpp index acd184ccc..11a9f4908 100755 --- a/TMessagesProj/jni/voip/libtgvoip/EchoCanceller.cpp +++ b/TMessagesProj/jni/voip/libtgvoip/EchoCanceller.cpp @@ -27,9 +27,7 @@ EchoCanceller::EchoCanceller(bool enableAEC, bool enableNS, bool enableAGC){ this->enableNS=enableNS; isOn=true; - webrtc::Config extraConfig; - - apm=webrtc::AudioProcessingBuilder().Create(extraConfig); + apm=webrtc::AudioProcessingBuilder().Create(); webrtc::AudioProcessing::Config config; config.echo_canceller.enabled = enableAEC; @@ -70,7 +68,6 @@ EchoCanceller::EchoCanceller(bool enableAEC, bool enableNS, bool enableAGC){ config.gain_controller1.enable_limiter = ServerConfig::GetSharedInstance()->GetBoolean("webrtc_agc_enable_limiter", true); config.gain_controller1.compression_gain_db = ServerConfig::GetSharedInstance()->GetInt("webrtc_agc_compression_gain", 20); } - config.voice_detection.enabled = true; apm->ApplyConfig(config); @@ -126,10 +123,8 @@ void EchoCanceller::RunBufferFarendThread() { frame.sample_rate_hz_ = 48000; frame.samples_per_channel_ = 480; - webrtc::StreamConfig input_config(frame.sample_rate_hz_, frame.num_channels_, - /*has_keyboard=*/false); - webrtc::StreamConfig output_config(frame.sample_rate_hz_, frame.num_channels_, - /*has_keyboard=*/false); + webrtc::StreamConfig input_config(frame.sample_rate_hz_, frame.num_channels_); + webrtc::StreamConfig output_config(frame.sample_rate_hz_, frame.num_channels_); while (running) { int16_t *samplesIn = farendQueue->GetBlocking(); @@ -159,10 +154,8 @@ void EchoCanceller::ProcessInput(int16_t* inOut, size_t numSamples, bool& hasVoi int delay = audio::AudioInput::GetEstimatedDelay() + audio::AudioOutput::GetEstimatedDelay(); assert(numSamples == 960); - webrtc::StreamConfig input_config(audioFrame->sample_rate_hz_, audioFrame->num_channels_, - /*has_keyboard=*/false); - webrtc::StreamConfig output_config(audioFrame->sample_rate_hz_, audioFrame->num_channels_, - /*has_keyboard=*/false); + webrtc::StreamConfig input_config(audioFrame->sample_rate_hz_, audioFrame->num_channels_); + webrtc::StreamConfig output_config(audioFrame->sample_rate_hz_, audioFrame->num_channels_); memcpy(audioFrame->mutable_data(), inOut, 480 * 2); if (enableAEC) @@ -201,7 +194,7 @@ void EchoCanceller::SetVoiceDetectionEnabled(bool enabled) { enableVAD = enabled; #ifndef TGVOIP_NO_DSP auto config = apm->GetConfig(); - config.voice_detection.enabled = enabled; + // config.voice_detection.enabled = enabled; apm->ApplyConfig(config); #endif } diff --git a/TMessagesProj/jni/voip/org_telegram_messenger_voip_Instance.cpp b/TMessagesProj/jni/voip/org_telegram_messenger_voip_Instance.cpp index 38633b2e5..d89b75ea1 100644 --- a/TMessagesProj/jni/voip/org_telegram_messenger_voip_Instance.cpp +++ b/TMessagesProj/jni/voip/org_telegram_messenger_voip_Instance.cpp @@ -19,7 +19,6 @@ #include "pc/video_track.h" #include "legacy/InstanceImplLegacy.h" #include "InstanceImpl.h" -#include "reference/InstanceImplReference.h" #include "libtgvoip/os/android/AudioOutputOpenSLES.h" #include "libtgvoip/os/android/AudioInputOpenSLES.h" #include "libtgvoip/os/android/JNIUtilities.h" @@ -29,7 +28,6 @@ using namespace tgcalls; const auto RegisterTag = Register(); const auto RegisterTagLegacy = Register(); -const auto RegisterTagReference = tgcalls::Register(); jclass TrafficStatsClass; jclass FingerprintClass; @@ -137,6 +135,19 @@ private: VideoChannelDescription::Quality _quality; }; +class RequestCurrentTimeTaskJava : public BroadcastPartTask { +public: + RequestCurrentTimeTaskJava(std::function callback) : + _callback(std::move(callback)) { + } + + std::function _callback; +private: + void cancel() override { + + } +}; + class JavaObject { private: JNIEnv *env; @@ -465,6 +476,14 @@ JNIEXPORT jlong JNICALL Java_org_telegram_messenger_voip_NativeInstance_makeGrou }); return task; }; + descriptor.requestCurrentTime = [platformContext](std::function callback) -> std::shared_ptr { + std::shared_ptr task = std::make_shared(callback); + tgvoip::jni::DoWithJNI([platformContext, task](JNIEnv *env) { + jobject globalRef = ((AndroidContext *) platformContext.get())->getJavaInstance(); + env->CallVoidMethod(globalRef, env->GetMethodID(NativeInstanceClass, "requestCurrentTime", "(J)V"), (jlong) task.get()); + }); + return task; + }; } auto *holder = new InstanceHolder; @@ -480,17 +499,18 @@ JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_setJoinRe if (instance->groupNativeInstance == nullptr) { return; } - instance->groupNativeInstance->setConnectionMode(GroupConnectionMode::GroupConnectionModeRtc, true); + instance->groupNativeInstance->setConnectionMode(GroupConnectionMode::GroupConnectionModeRtc, true, true); instance->groupNativeInstance->setJoinResponsePayload(tgvoip::jni::JavaStringToStdString(env, payload)); } extern "C" -JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_prepareForStream(JNIEnv *env, jobject obj) { +JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_prepareForStream(JNIEnv *env, jobject obj, jboolean isRtmpStream) { InstanceHolder *instance = getInstanceHolder(env, obj); if (instance->groupNativeInstance == nullptr) { return; } - instance->groupNativeInstance->setConnectionMode(GroupConnectionMode::GroupConnectionModeBroadcast, true); + instance->groupNativeInstance->setConnectionMode(GroupConnectionMode::GroupConnectionModeBroadcast, true, + isRtmpStream); } void onEmitJoinPayload(const std::shared_ptr& platformContext, const GroupJoinPayload& payload) { @@ -506,7 +526,7 @@ JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_resetGrou return; } if (set) { - instance->groupNativeInstance->setConnectionMode(GroupConnectionMode::GroupConnectionModeNone, !disconnect); + instance->groupNativeInstance->setConnectionMode(GroupConnectionMode::GroupConnectionModeNone, !disconnect, true); } std::shared_ptr platformContext = instance->_platformContext; instance->groupNativeInstance->emitJoinPayload([platformContext](const GroupJoinPayload& payload) { @@ -1066,4 +1086,14 @@ JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_onSignali env->ReleaseByteArrayElements(value, (jbyte *) valueBytes, JNI_ABORT); } +extern "C" +JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_onRequestTimeComplete(JNIEnv *env, jobject obj, jlong taskPtr, jlong currentTime) { + InstanceHolder *instance = getInstanceHolder(env, obj); + if (instance->groupNativeInstance == nullptr) { + return; + } + auto task = reinterpret_cast(taskPtr); + task->_callback(currentTime); +} + } \ No newline at end of file diff --git a/TMessagesProj/jni/voip/tgcalls/CodecSelectHelper.cpp b/TMessagesProj/jni/voip/tgcalls/CodecSelectHelper.cpp index ebf30427f..af00efdd8 100644 --- a/TMessagesProj/jni/voip/tgcalls/CodecSelectHelper.cpp +++ b/TMessagesProj/jni/voip/tgcalls/CodecSelectHelper.cpp @@ -106,11 +106,7 @@ std::vector::const_iterator FindEqualFormat( const std::vector &list, const VideoFormat &format) { return std::find_if(list.begin(), list.end(), [&](const VideoFormat &other) { - return cricket::IsSameCodec( - format.name, - format.parameters, - other.name, - other.parameters); + return format.IsSameCodec(other); }); } diff --git a/TMessagesProj/jni/voip/tgcalls/EncryptedConnection.cpp b/TMessagesProj/jni/voip/tgcalls/EncryptedConnection.cpp index 61f561ca5..15f2e86d1 100644 --- a/TMessagesProj/jni/voip/tgcalls/EncryptedConnection.cpp +++ b/TMessagesProj/jni/voip/tgcalls/EncryptedConnection.cpp @@ -63,7 +63,7 @@ bool ConstTimeIsDifferent(const void *a, const void *b, size_t size) { auto cb = reinterpret_cast(b); volatile auto different = false; for (const auto ce = ca + size; ca != ce; ++ca, ++cb) { - different |= (*ca != *cb); + different = different | (*ca != *cb); } return different; } diff --git a/TMessagesProj/jni/voip/tgcalls/FakeVideoTrackSource.cpp b/TMessagesProj/jni/voip/tgcalls/FakeVideoTrackSource.cpp index 67c827f19..a740308f7 100644 --- a/TMessagesProj/jni/voip/tgcalls/FakeVideoTrackSource.cpp +++ b/TMessagesProj/jni/voip/tgcalls/FakeVideoTrackSource.cpp @@ -121,13 +121,13 @@ class FakeVideoSource : public rtc::VideoSourceInterface { } using VideoFrameT = webrtc::VideoFrame; void AddOrUpdateSink(rtc::VideoSinkInterface *sink, const rtc::VideoSinkWants &wants) override { - RTC_LOG(WARNING) << "ADD"; + RTC_LOG(LS_WARNING) << "ADD"; data_->broadcaster_.AddOrUpdateSink(sink, wants); } // RemoveSink must guarantee that at the time the method returns, // there is no current and no future calls to VideoSinkInterface::OnFrame. void RemoveSink(rtc::VideoSinkInterface *sink) { - RTC_LOG(WARNING) << "REMOVE"; + RTC_LOG(LS_WARNING) << "REMOVE"; data_->broadcaster_.RemoveSink(sink); } diff --git a/TMessagesProj/jni/voip/tgcalls/Manager.cpp b/TMessagesProj/jni/voip/tgcalls/Manager.cpp index 1244e82a2..5069b58e3 100644 --- a/TMessagesProj/jni/voip/tgcalls/Manager.cpp +++ b/TMessagesProj/jni/voip/tgcalls/Manager.cpp @@ -62,7 +62,7 @@ void dumpStatsLog(const FilePath &path, const CallStats &stats) { } // namespace -bool Manager::ResolvedNetworkStatus::operator==(const ResolvedNetworkStatus &rhs) { +bool Manager::ResolvedNetworkStatus::operator==(const ResolvedNetworkStatus &rhs) const { if (rhs.isLowCost != isLowCost) { return false; } @@ -72,7 +72,7 @@ bool Manager::ResolvedNetworkStatus::operator==(const ResolvedNetworkStatus &rhs return true; } -bool Manager::ResolvedNetworkStatus::operator!=(const ResolvedNetworkStatus &rhs) { +bool Manager::ResolvedNetworkStatus::operator!=(const ResolvedNetworkStatus &rhs) const { return !(*this == rhs); } @@ -139,9 +139,9 @@ void Manager::sendSignalingAsync(int delayMs, int cause) { } }; if (delayMs) { - _thread->PostDelayedTask(RTC_FROM_HERE, std::move(task), delayMs); + _thread->PostDelayedTask(std::move(task), delayMs); } else { - _thread->PostTask(RTC_FROM_HERE, std::move(task)); + _thread->PostTask(std::move(task)); } } @@ -149,7 +149,7 @@ void Manager::start() { const auto weak = std::weak_ptr(shared_from_this()); const auto thread = _thread; const auto sendSignalingMessage = [=](Message &&message) { - thread->PostTask(RTC_FROM_HERE, [=, message = std::move(message)]() mutable { + thread->PostTask([=, message = std::move(message)]() mutable { const auto strong = weak.lock(); if (!strong) { return; @@ -167,7 +167,7 @@ void Manager::start() { rtcServers, std::move(proxy), [=](const NetworkManager::State &state) { - thread->PostTask(RTC_FROM_HERE, [=] { + thread->PostTask([=] { const auto strong = weak.lock(); if (!strong) { return; @@ -200,7 +200,7 @@ void Manager::start() { }); }, [=](DecryptedMessage &&message) { - thread->PostTask(RTC_FROM_HERE, [=, message = std::move(message)]() mutable { + thread->PostTask([=, message = std::move(message)]() mutable { if (const auto strong = weak.lock()) { strong->receiveMessage(std::move(message)); } @@ -216,9 +216,9 @@ void Manager::start() { } }; if (delayMs) { - thread->PostDelayedTask(RTC_FROM_HERE, task, delayMs); + thread->PostDelayedTask(task, delayMs); } else { - thread->PostTask(RTC_FROM_HERE, task); + thread->PostTask(task); } }); })); @@ -232,7 +232,7 @@ void Manager::start() { videoCapture, sendSignalingMessage, [=](Message &&message) { - thread->PostTask(RTC_FROM_HERE, [=, message = std::move(message)]() mutable { + thread->PostTask([=, message = std::move(message)]() mutable { const auto strong = weak.lock(); if (!strong) { return; @@ -334,7 +334,7 @@ void Manager::setMuteOutgoingAudio(bool mute) { }); } -void Manager::setIncomingVideoOutput(std::shared_ptr> sink) { +void Manager::setIncomingVideoOutput(std::weak_ptr> sink) { _mediaManager->perform(RTC_FROM_HERE, [sink](MediaManager *mediaManager) { mediaManager->setIncomingVideoOutput(sink); }); @@ -362,7 +362,7 @@ void Manager::getNetworkStats(std::function comp CallStats callStats; networkManager->fillCallStats(callStats); - thread->PostTask(RTC_FROM_HERE, [weak, networkStats, completion = std::move(completion), callStats = std::move(callStats), statsLogPath = statsLogPath] { + thread->PostTask([weak, networkStats, completion = std::move(completion), callStats = std::move(callStats), statsLogPath = statsLogPath] { const auto strong = weak.lock(); if (!strong) { return; diff --git a/TMessagesProj/jni/voip/tgcalls/Manager.h b/TMessagesProj/jni/voip/tgcalls/Manager.h index ceab976ad..a4a4a8f6d 100644 --- a/TMessagesProj/jni/voip/tgcalls/Manager.h +++ b/TMessagesProj/jni/voip/tgcalls/Manager.h @@ -16,8 +16,8 @@ private: bool isLowCost = false; bool isLowDataRequested = false; - bool operator==(const ResolvedNetworkStatus &rhs); - bool operator!=(const ResolvedNetworkStatus &rhs); + bool operator==(const ResolvedNetworkStatus &rhs) const; + bool operator!=(const ResolvedNetworkStatus &rhs) const; }; public: @@ -32,7 +32,7 @@ public: void sendVideoDeviceUpdated(); void setRequestedVideoAspect(float aspect); void setMuteOutgoingAudio(bool mute); - void setIncomingVideoOutput(std::shared_ptr> sink); + void setIncomingVideoOutput(std::weak_ptr> sink); void setIsLowBatteryLevel(bool isLowBatteryLevel); void setIsLocalNetworkLowCost(bool isLocalNetworkLowCost); void getNetworkStats(std::function completion); diff --git a/TMessagesProj/jni/voip/tgcalls/MediaManager.cpp b/TMessagesProj/jni/voip/tgcalls/MediaManager.cpp index cd03d3b63..a1071f47e 100644 --- a/TMessagesProj/jni/voip/tgcalls/MediaManager.cpp +++ b/TMessagesProj/jni/voip/tgcalls/MediaManager.cpp @@ -18,11 +18,13 @@ #include "system_wrappers/include/field_trial.h" #include "api/video/builtin_video_bitrate_allocator_factory.h" #include "call/call.h" -#include "modules/rtp_rtcp/source/rtp_utility.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "api/call/audio_sink.h" #include "modules/audio_processing/audio_buffer.h" #include "modules/audio_device/include/audio_device_factory.h" - +#ifdef WEBRTC_IOS +#include "platform/darwin/iOS/tgcalls_audio_device_module_ios.h" +#endif namespace tgcalls { namespace { @@ -163,30 +165,30 @@ public: } virtual void OnFrame(const webrtc::VideoFrame& frame) override { - if (_impl) { + if (const auto strong = _impl.lock()) { if (_rewriteRotation) { webrtc::VideoFrame updatedFrame = frame; //updatedFrame.set_rotation(webrtc::VideoRotation::kVideoRotation_90); - _impl->OnFrame(updatedFrame); + strong->OnFrame(updatedFrame); } else { - _impl->OnFrame(frame); + strong->OnFrame(frame); } } } virtual void OnDiscardedFrame() override { - if (_impl) { - _impl->OnDiscardedFrame(); + if (const auto strong = _impl.lock()) { + strong->OnDiscardedFrame(); } } - void setSink(std::shared_ptr> impl) { + void setSink(std::weak_ptr> impl) { _impl = impl; } private: bool _rewriteRotation = false; - std::shared_ptr> _impl; + std::weak_ptr> _impl; }; @@ -312,7 +314,7 @@ _platformContext(platformContext) { webrtc::AudioProcessingBuilder builder; std::unique_ptr audioProcessor = std::make_unique([this](float level) { - this->_thread->PostTask(RTC_FROM_HERE, [this, level](){ + this->_thread->PostTask([this, level](){ auto strong = this; strong->_currentMyAudioLevel = level; }); @@ -405,9 +407,13 @@ _platformContext(platformContext) { rtc::scoped_refptr MediaManager::createAudioDeviceModule() { const auto create = [&](webrtc::AudioDeviceModule::AudioLayer layer) { +#ifdef WEBRTC_IOS + return rtc::make_ref_counted(false, false); +#else return webrtc::AudioDeviceModule::Create( layer, _taskQueueFactory.get()); +#endif }; const auto check = [&](const rtc::scoped_refptr &result) { return (result && result->Init() == 0) ? result : nullptr; @@ -430,7 +436,7 @@ void MediaManager::start() { // Here we hope that thread outlives the sink rtc::Thread *thread = _thread; std::unique_ptr incomingSink(new AudioTrackSinkInterfaceImpl([weak, thread](float level) { - thread->PostTask(RTC_FROM_HERE, [weak, level] { + thread->PostTask([weak, level] { if (const auto strong = weak.lock()) { strong->_currentAudioLevel = level; } @@ -539,7 +545,7 @@ void MediaManager::sendOutgoingMediaStateMessage() { void MediaManager::beginStatsTimer(int timeoutMs) { const auto weak = std::weak_ptr(shared_from_this()); - _thread->PostDelayedTask(RTC_FROM_HERE, [weak]() { + _thread->PostDelayedTask([weak]() { auto strong = weak.lock(); if (!strong) { return; @@ -550,7 +556,7 @@ void MediaManager::beginStatsTimer(int timeoutMs) { void MediaManager::beginLevelsTimer(int timeoutMs) { const auto weak = std::weak_ptr(shared_from_this()); - _thread->PostDelayedTask(RTC_FROM_HERE, [weak]() { + _thread->PostDelayedTask([weak]() { auto strong = weak.lock(); if (!strong) { return; @@ -647,7 +653,7 @@ void MediaManager::setSendVideo(std::shared_ptr videoCapt const auto object = GetVideoCaptureAssumingSameThread(_videoCapture.get()); _isScreenCapture = object->isScreenCapture(); object->setStateUpdated([=](VideoState state) { - thread->PostTask(RTC_FROM_HERE, [=] { + thread->PostTask([=] { if (const auto strong = weak.lock()) { strong->setOutgoingVideoState(state); } @@ -908,28 +914,31 @@ void MediaManager::setOutgoingVideoState(VideoState state) { sendOutgoingMediaStateMessage(); } -void MediaManager::setIncomingVideoOutput(std::shared_ptr> sink) { +void MediaManager::setIncomingVideoOutput(std::weak_ptr> sink) { _incomingVideoSinkProxy->setSink(sink); } -static bool IsRtcp(const uint8_t* packet, size_t length) { - webrtc::RtpUtility::RtpHeaderParser rtp_parser(packet, length); - return rtp_parser.RTCP(); -} - void MediaManager::receiveMessage(DecryptedMessage &&message) { const auto data = &message.message.data; if (const auto formats = absl::get_if(data)) { setPeerVideoFormats(std::move(*formats)); } else if (const auto audio = absl::get_if(data)) { - if (IsRtcp(audio->data.data(), audio->data.size())) { + if (webrtc::IsRtcpPacket(audio->data)) { RTC_LOG(LS_VERBOSE) << "Deliver audio RTCP"; } - _call->Receiver()->DeliverPacket(webrtc::MediaType::AUDIO, audio->data, -1); + if (webrtc::IsRtcpPacket(audio->data)) { + _call->Receiver()->DeliverPacket(webrtc::MediaType::ANY, audio->data, -1); + } else { + _call->Receiver()->DeliverPacket(webrtc::MediaType::AUDIO, audio->data, -1); + } } else if (const auto video = absl::get_if(data)) { if (_videoChannel) { if (_readyToReceiveVideo) { - _call->Receiver()->DeliverPacket(webrtc::MediaType::VIDEO, video->data, -1); + if (webrtc::IsRtcpPacket(video->data)) { + _call->Receiver()->DeliverPacket(webrtc::MediaType::ANY, video->data, -1); + } else { + _call->Receiver()->DeliverPacket(webrtc::MediaType::VIDEO, video->data, -1); + } } else { // maybe we need to queue packets for some time? } diff --git a/TMessagesProj/jni/voip/tgcalls/MediaManager.h b/TMessagesProj/jni/voip/tgcalls/MediaManager.h index baaec2729..f4e321506 100644 --- a/TMessagesProj/jni/voip/tgcalls/MediaManager.h +++ b/TMessagesProj/jni/voip/tgcalls/MediaManager.h @@ -61,7 +61,7 @@ public: void sendVideoDeviceUpdated(); void setRequestedVideoAspect(float aspect); void setMuteOutgoingAudio(bool mute); - void setIncomingVideoOutput(std::shared_ptr> sink); + void setIncomingVideoOutput(std::weak_ptr> sink); void receiveMessage(DecryptedMessage &&message); void remoteVideoStateUpdated(VideoState videoState); void setNetworkParameters(bool isLowCost, bool isDataSavingActive); diff --git a/TMessagesProj/jni/voip/tgcalls/NetworkManager.cpp b/TMessagesProj/jni/voip/tgcalls/NetworkManager.cpp index f84fa54dc..dccee7adc 100644 --- a/TMessagesProj/jni/voip/tgcalls/NetworkManager.cpp +++ b/TMessagesProj/jni/voip/tgcalls/NetworkManager.cpp @@ -107,7 +107,7 @@ NetworkManager::~NetworkManager() { } void NetworkManager::start() { - _socketFactory.reset(new rtc::BasicPacketSocketFactory(_thread)); + _socketFactory.reset(new rtc::BasicPacketSocketFactory(_thread->socketserver())); _networkManager = std::make_unique(_networkMonitorFactory.get()); @@ -274,7 +274,7 @@ void NetworkManager::logCurrentNetworkState() { void NetworkManager::checkConnectionTimeout() { const auto weak = std::weak_ptr(shared_from_this()); - _thread->PostDelayedTask(RTC_FROM_HERE, [weak]() { + _thread->PostDelayedTask([weak]() { auto strong = weak.lock(); if (!strong) { return; diff --git a/TMessagesProj/jni/voip/tgcalls/SctpDataChannelProviderInterfaceImpl.cpp b/TMessagesProj/jni/voip/tgcalls/SctpDataChannelProviderInterfaceImpl.cpp index 27f7d063d..fea07b8a1 100644 --- a/TMessagesProj/jni/voip/tgcalls/SctpDataChannelProviderInterfaceImpl.cpp +++ b/TMessagesProj/jni/voip/tgcalls/SctpDataChannelProviderInterfaceImpl.cpp @@ -103,7 +103,7 @@ void SctpDataChannelProviderInterfaceImpl::sctpReadyToSendData() { _dataChannel->OnTransportReady(true); } -void SctpDataChannelProviderInterfaceImpl::sctpClosedAbruptly() { +void SctpDataChannelProviderInterfaceImpl::sctpClosedAbruptly(webrtc::RTCError error) { assert(_threads->getNetworkThread()->IsCurrent()); if (_onTerminated) { @@ -117,10 +117,15 @@ void SctpDataChannelProviderInterfaceImpl::sctpDataReceived(const cricket::Recei _dataChannel->OnDataReceived(params, buffer); } -bool SctpDataChannelProviderInterfaceImpl::SendData(int sid, const webrtc::SendDataParams& params, const rtc::CopyOnWriteBuffer& payload, cricket::SendDataResult* result) { +bool SctpDataChannelProviderInterfaceImpl::SendData( + int sid, + const webrtc::SendDataParams& params, + const rtc::CopyOnWriteBuffer& payload, + cricket::SendDataResult* result +) { assert(_threads->getNetworkThread()->IsCurrent()); - return _sctpTransport->SendData(sid, params, payload); + return _sctpTransport->SendData(sid, params, payload, result); } bool SctpDataChannelProviderInterfaceImpl::ConnectDataChannel(webrtc::SctpDataChannel *data_channel) { diff --git a/TMessagesProj/jni/voip/tgcalls/SctpDataChannelProviderInterfaceImpl.h b/TMessagesProj/jni/voip/tgcalls/SctpDataChannelProviderInterfaceImpl.h index 2ecaffc14..59904afc6 100644 --- a/TMessagesProj/jni/voip/tgcalls/SctpDataChannelProviderInterfaceImpl.h +++ b/TMessagesProj/jni/voip/tgcalls/SctpDataChannelProviderInterfaceImpl.h @@ -5,6 +5,7 @@ #include "api/turn_customizer.h" #include "api/data_channel_interface.h" #include "pc/sctp_data_channel.h" +#include "media/sctp/sctp_transport_factory.h" #include "pc/sctp_transport.h" #include "StaticThreads.h" @@ -32,7 +33,11 @@ public: virtual void OnStateChange() override; virtual void OnMessage(const webrtc::DataBuffer& buffer) override; - virtual bool SendData(int sid, const webrtc::SendDataParams& params, const rtc::CopyOnWriteBuffer& payload, cricket::SendDataResult* result = nullptr) override; + virtual bool SendData( + int sid, + const webrtc::SendDataParams& params, + const rtc::CopyOnWriteBuffer& payload, + cricket::SendDataResult* result) override; virtual bool ConnectDataChannel(webrtc::SctpDataChannel *data_channel) override; virtual void DisconnectDataChannel(webrtc::SctpDataChannel* data_channel) override; virtual void AddSctpDataStream(int sid) override; @@ -41,7 +46,7 @@ public: private: void sctpReadyToSendData(); - void sctpClosedAbruptly(); + void sctpClosedAbruptly(webrtc::RTCError error); void sctpDataReceived(const cricket::ReceiveDataParams& params, const rtc::CopyOnWriteBuffer& buffer); private: diff --git a/TMessagesProj/jni/voip/tgcalls/StaticThreads.cpp b/TMessagesProj/jni/voip/tgcalls/StaticThreads.cpp index 1d14b6fad..0cc9ac56e 100644 --- a/TMessagesProj/jni/voip/tgcalls/StaticThreads.cpp +++ b/TMessagesProj/jni/voip/tgcalls/StaticThreads.cpp @@ -60,16 +60,17 @@ class ThreadsImpl : public Threads { public: explicit ThreadsImpl(size_t i) { auto suffix = i == 0 ? "" : "#" + std::to_string(i); - network_ = create_network("tgc-net" + suffix); - network_->DisallowAllInvokes(); media_ = create("tgc-media" + suffix); - worker_ = create("tgc-work" + suffix); - worker_->DisallowAllInvokes(); - worker_->AllowInvokesToThread(network_.get()); + //worker_ = create("tgc-work" + suffix); + worker_ = create_network("tgc-work" + suffix); + //network_ = create_network("tgc-net" + suffix); + //network_->DisallowAllInvokes(); + //worker_->DisallowAllInvokes(); + //worker_->AllowInvokesToThread(network_.get()); } rtc::Thread *getNetworkThread() override { - return network_.get(); + return worker_.get(); } rtc::Thread *getMediaThread() override { return media_.get(); @@ -89,7 +90,7 @@ public: } private: - Thread network_; + //Thread network_; Thread media_; Thread worker_; rtc::scoped_refptr shared_module_thread_; diff --git a/TMessagesProj/jni/voip/tgcalls/ThreadLocalObject.h b/TMessagesProj/jni/voip/tgcalls/ThreadLocalObject.h index 3c6c51cf5..cd96ac34d 100644 --- a/TMessagesProj/jni/voip/tgcalls/ThreadLocalObject.h +++ b/TMessagesProj/jni/voip/tgcalls/ThreadLocalObject.h @@ -19,20 +19,20 @@ public: _thread(thread), _valueHolder(std::make_unique()) { assert(_thread != nullptr); - _thread->PostTask(RTC_FROM_HERE, [valueHolder = _valueHolder.get(), generator = std::forward(generator)]() mutable { + _thread->PostTask([valueHolder = _valueHolder.get(), generator = std::forward(generator)]() mutable { valueHolder->_value.reset(generator()); }); } ~ThreadLocalObject() { - _thread->PostTask(RTC_FROM_HERE, [valueHolder = std::move(_valueHolder)](){ + _thread->PostTask([valueHolder = std::move(_valueHolder)](){ valueHolder->_value.reset(); }); } template void perform(const rtc::Location& posted_from, FunctorT &&functor) { - _thread->PostTask(posted_from, [valueHolder = _valueHolder.get(), f = std::forward(functor)]() mutable { + _thread->PostTask([valueHolder = _valueHolder.get(), f = std::forward(functor)]() mutable { assert(valueHolder->_value != nullptr); f(valueHolder->_value.get()); }); diff --git a/TMessagesProj/jni/voip/tgcalls/group/AVIOContextImpl.cpp b/TMessagesProj/jni/voip/tgcalls/group/AVIOContextImpl.cpp new file mode 100644 index 000000000..337efa9b2 --- /dev/null +++ b/TMessagesProj/jni/voip/tgcalls/group/AVIOContextImpl.cpp @@ -0,0 +1,64 @@ +#include "AVIOContextImpl.h" + +#include "rtc_base/logging.h" +#include "rtc_base/third_party/base64/base64.h" +#include "api/video/i420_buffer.h" + +#include +#include +#include + +namespace tgcalls { + +namespace { + +int AVIOContextImplRead(void *opaque, unsigned char *buffer, int bufferSize) { + AVIOContextImpl *instance = static_cast(opaque); + + int bytesToRead = std::min(bufferSize, ((int)instance->_fileData.size()) - instance->_fileReadPosition); + if (bytesToRead < 0) { + bytesToRead = 0; + } + + if (bytesToRead > 0) { + memcpy(buffer, instance->_fileData.data() + instance->_fileReadPosition, bytesToRead); + instance->_fileReadPosition += bytesToRead; + + return bytesToRead; + } else { + return AVERROR_EOF; + } +} + +int64_t AVIOContextImplSeek(void *opaque, int64_t offset, int whence) { + AVIOContextImpl *instance = static_cast(opaque); + + if (whence == 0x10000) { + return (int64_t)instance->_fileData.size(); + } else { + int64_t seekOffset = std::min(offset, (int64_t)instance->_fileData.size()); + if (seekOffset < 0) { + seekOffset = 0; + } + instance->_fileReadPosition = (int)seekOffset; + return seekOffset; + } +} + +} + +AVIOContextImpl::AVIOContextImpl(std::vector &&fileData) : +_fileData(std::move(fileData)) { + _buffer.resize(4 * 1024); + _context = avio_alloc_context(_buffer.data(), (int)_buffer.size(), 0, this, &AVIOContextImplRead, NULL, &AVIOContextImplSeek); +} + +AVIOContextImpl::~AVIOContextImpl() { + av_free(_context); +} + +AVIOContext *AVIOContextImpl::getContext() const { + return _context; +}; + +} diff --git a/TMessagesProj/jni/voip/tgcalls/group/AVIOContextImpl.h b/TMessagesProj/jni/voip/tgcalls/group/AVIOContextImpl.h new file mode 100644 index 000000000..a4a4987b6 --- /dev/null +++ b/TMessagesProj/jni/voip/tgcalls/group/AVIOContextImpl.h @@ -0,0 +1,40 @@ +#ifndef TGCALLS_AVIOCONTEXTIMPL_H +#define TGCALLS_AVIOCONTEXTIMPL_H + +#include "absl/types/optional.h" +#include +#include + +#include "api/video/video_frame.h" +#include "absl/types/optional.h" + +// Fix build on Windows - this should appear before FFmpeg timestamp include. +#define _USE_MATH_DEFINES +#include + +extern "C" { +#include +#include +#include +} + +namespace tgcalls { + +class AVIOContextImpl { +public: + AVIOContextImpl(std::vector &&fileData); + ~AVIOContextImpl(); + + AVIOContext *getContext() const; + +public: + std::vector _fileData; + int _fileReadPosition = 0; + + std::vector _buffer; + AVIOContext *_context = nullptr; +}; + +} + +#endif diff --git a/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.cpp b/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.cpp index f10c6104f..a4e68d1e3 100644 --- a/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.cpp +++ b/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.cpp @@ -1,14 +1,10 @@ #include "AudioStreamingPart.h" +#include "AudioStreamingPartInternal.h" + #include "rtc_base/logging.h" #include "rtc_base/third_party/base64/base64.h" -extern "C" { -#include -#include -#include -} - #include #include #include @@ -162,302 +158,6 @@ struct ReadPcmResult { int numChannels = 0; }; -class AudioStreamingPartInternal { -public: - AudioStreamingPartInternal(std::vector &&fileData) : - _avIoContext(std::move(fileData)) { - int ret = 0; - - _frame = av_frame_alloc(); - - AVInputFormat *inputFormat = av_find_input_format("ogg"); - if (!inputFormat) { - _didReadToEnd = true; - return; - } - - _inputFormatContext = avformat_alloc_context(); - if (!_inputFormatContext) { - _didReadToEnd = true; - return; - } - - _inputFormatContext->pb = _avIoContext.getContext(); - - if ((ret = avformat_open_input(&_inputFormatContext, "", inputFormat, nullptr)) < 0) { - _didReadToEnd = true; - return; - } - - if ((ret = avformat_find_stream_info(_inputFormatContext, nullptr)) < 0) { - _didReadToEnd = true; - - avformat_close_input(&_inputFormatContext); - _inputFormatContext = nullptr; - return; - } - - AVCodecParameters *audioCodecParameters = nullptr; - AVStream *audioStream = nullptr; - for (int i = 0; i < _inputFormatContext->nb_streams; i++) { - AVStream *inStream = _inputFormatContext->streams[i]; - - AVCodecParameters *inCodecpar = inStream->codecpar; - if (inCodecpar->codec_type != AVMEDIA_TYPE_AUDIO) { - continue; - } - audioCodecParameters = inCodecpar; - audioStream = inStream; - - _durationInMilliseconds = (int)((inStream->duration + inStream->first_dts) * 1000 / 48000); - - if (inStream->metadata) { - AVDictionaryEntry *entry = av_dict_get(inStream->metadata, "TG_META", nullptr, 0); - if (entry && entry->value) { - std::string result; - size_t data_used = 0; - std::string sourceBase64 = (const char *)entry->value; - rtc::Base64::Decode(sourceBase64, rtc::Base64::DO_LAX, &result, &data_used); - - if (result.size() != 0) { - int offset = 0; - _channelUpdates = parseChannelUpdates(result, offset); - } - } - - uint32_t videoChannelMask = 0; - entry = av_dict_get(inStream->metadata, "ACTIVE_MASK", nullptr, 0); - if (entry && entry->value) { - std::string sourceString = (const char *)entry->value; - videoChannelMask = stringToUInt32(sourceString); - } - - std::vector endpointList; - entry = av_dict_get(inStream->metadata, "ENDPOINTS", nullptr, 0); - if (entry && entry->value) { - std::string sourceString = (const char *)entry->value; - endpointList = splitString(sourceString, ' '); - } - - std::bitset<32> videoChannels(videoChannelMask); - size_t endpointIndex = 0; - if (videoChannels.count() == endpointList.size()) { - for (size_t i = 0; i < videoChannels.size(); i++) { - if (videoChannels[i]) { - _endpointMapping.insert(std::make_pair(endpointList[endpointIndex], i)); - endpointIndex++; - } - } - } - } - - break; - } - - if (audioCodecParameters && audioStream) { - AVCodec *codec = avcodec_find_decoder(audioCodecParameters->codec_id); - if (codec) { - _codecContext = avcodec_alloc_context3(codec); - ret = avcodec_parameters_to_context(_codecContext, audioCodecParameters); - if (ret < 0) { - _didReadToEnd = true; - - avcodec_free_context(&_codecContext); - _codecContext = nullptr; - } else { - _codecContext->pkt_timebase = audioStream->time_base; - - _channelCount = _codecContext->channels; - - ret = avcodec_open2(_codecContext, codec, nullptr); - if (ret < 0) { - _didReadToEnd = true; - - avcodec_free_context(&_codecContext); - _codecContext = nullptr; - } - } - } - } - } - - ~AudioStreamingPartInternal() { - if (_frame) { - av_frame_unref(_frame); - } - if (_codecContext) { - avcodec_close(_codecContext); - avcodec_free_context(&_codecContext); - } - if (_inputFormatContext) { - avformat_close_input(&_inputFormatContext); - } - } - - ReadPcmResult readPcm(std::vector &outPcm) { - int outPcmSampleOffset = 0; - ReadPcmResult result; - - int readSamples = (int)outPcm.size() / _channelCount; - - result.numChannels = _channelCount; - - while (outPcmSampleOffset < readSamples) { - if (_pcmBufferSampleOffset >= _pcmBufferSampleSize) { - fillPcmBuffer(); - - if (_pcmBufferSampleOffset >= _pcmBufferSampleSize) { - break; - } - } - - int readFromPcmBufferSamples = std::min(_pcmBufferSampleSize - _pcmBufferSampleOffset, readSamples - outPcmSampleOffset); - if (readFromPcmBufferSamples != 0) { - std::copy(_pcmBuffer.begin() + _pcmBufferSampleOffset * _channelCount, _pcmBuffer.begin() + _pcmBufferSampleOffset * _channelCount + readFromPcmBufferSamples * _channelCount, outPcm.begin() + outPcmSampleOffset * _channelCount); - _pcmBufferSampleOffset += readFromPcmBufferSamples; - outPcmSampleOffset += readFromPcmBufferSamples; - result.numSamples += readFromPcmBufferSamples; - } - } - - return result; - } - - int getDurationInMilliseconds() { - return _durationInMilliseconds; - } - - int getChannelCount() { - return _channelCount; - } - - std::vector const &getChannelUpdates() const { - return _channelUpdates; - } - - std::map getEndpointMapping() const { - return _endpointMapping; - } - -private: - static int16_t sampleFloatToInt16(float sample) { - return av_clip_int16 (static_cast(lrint(sample*32767))); - } - - void fillPcmBuffer() { - _pcmBufferSampleSize = 0; - _pcmBufferSampleOffset = 0; - - if (_didReadToEnd) { - return; - } - if (!_inputFormatContext) { - _didReadToEnd = true; - return; - } - if (!_codecContext) { - _didReadToEnd = true; - return; - } - - int ret = 0; - do { - ret = av_read_frame(_inputFormatContext, &_packet); - if (ret < 0) { - _didReadToEnd = true; - return; - } - - ret = avcodec_send_packet(_codecContext, &_packet); - if (ret < 0) { - _didReadToEnd = true; - return; - } - - int bytesPerSample = av_get_bytes_per_sample(_codecContext->sample_fmt); - if (bytesPerSample != 2 && bytesPerSample != 4) { - _didReadToEnd = true; - return; - } - - ret = avcodec_receive_frame(_codecContext, _frame); - } while (ret == AVERROR(EAGAIN)); - - if (ret != 0) { - _didReadToEnd = true; - return; - } - if (_frame->channels != _channelCount || _frame->channels > 8) { - _didReadToEnd = true; - return; - } - - if (_pcmBuffer.size() < _frame->nb_samples * _frame->channels) { - _pcmBuffer.resize(_frame->nb_samples * _frame->channels); - } - - switch (_codecContext->sample_fmt) { - case AV_SAMPLE_FMT_S16: { - memcpy(_pcmBuffer.data(), _frame->data[0], _frame->nb_samples * 2 * _frame->channels); - } break; - - case AV_SAMPLE_FMT_S16P: { - int16_t *to = _pcmBuffer.data(); - for (int sample = 0; sample < _frame->nb_samples; ++sample) { - for (int channel = 0; channel < _frame->channels; ++channel) { - int16_t *shortChannel = (int16_t*)_frame->data[channel]; - *to++ = shortChannel[sample]; - } - } - } break; - - case AV_SAMPLE_FMT_FLT: { - float *floatData = (float *)&_frame->data[0]; - for (int i = 0; i < _frame->nb_samples * _frame->channels; i++) { - _pcmBuffer[i] = sampleFloatToInt16(floatData[i]); - } - } break; - - case AV_SAMPLE_FMT_FLTP: { - int16_t *to = _pcmBuffer.data(); - for (int sample = 0; sample < _frame->nb_samples; ++sample) { - for (int channel = 0; channel < _frame->channels; ++channel) { - float *floatChannel = (float*)_frame->data[channel]; - *to++ = sampleFloatToInt16(floatChannel[sample]); - } - } - } break; - - default: { - //RTC_FATAL() << "Unexpected sample_fmt"; - } break; - } - - _pcmBufferSampleSize = _frame->nb_samples; - _pcmBufferSampleOffset = 0; - } - -private: - AVIOContextImpl _avIoContext; - - AVFormatContext *_inputFormatContext = nullptr; - AVPacket _packet; - AVCodecContext *_codecContext = nullptr; - AVFrame *_frame = nullptr; - - bool _didReadToEnd = false; - - int _durationInMilliseconds = 0; - int _channelCount = 0; - - std::vector _channelUpdates; - std::map _endpointMapping; - - std::vector _pcmBuffer; - int _pcmBufferSampleOffset = 0; - int _pcmBufferSampleSize = 0; -}; - class AudioStreamingPartState { struct ChannelMapping { uint32_t ssrc = 0; @@ -469,15 +169,15 @@ class AudioStreamingPartState { }; public: - AudioStreamingPartState(std::vector &&data) : - _parsedPart(std::move(data)) { - if (_parsedPart.getChannelUpdates().size() == 0) { + AudioStreamingPartState(std::vector &&data, std::string const &container, bool isSingleChannel) : + _isSingleChannel(isSingleChannel), + _parsedPart(std::move(data), container) { + if (_parsedPart.getChannelUpdates().size() == 0 && !isSingleChannel) { _didReadToEnd = true; return; } _remainingMilliseconds = _parsedPart.getDurationInMilliseconds(); - _pcm10ms.resize(480 * _parsedPart.getChannelCount()); for (const auto &it : _parsedPart.getChannelUpdates()) { _allSsrcs.insert(it.ssrc); @@ -495,7 +195,7 @@ public: return _remainingMilliseconds; } - std::vector get10msPerChannel() { + std::vector get10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder) { if (_didReadToEnd) { return {}; } @@ -506,30 +206,50 @@ public: } } - auto readResult = _parsedPart.readPcm(_pcm10ms); + auto readResult = _parsedPart.readPcm(persistentDecoder, _pcm10ms); if (readResult.numSamples <= 0) { _didReadToEnd = true; return {}; } std::vector resultChannels; - for (const auto ssrc : _allSsrcs) { - AudioStreamingPart::StreamingPartChannel emptyPart; - emptyPart.ssrc = ssrc; - resultChannels.push_back(emptyPart); - } - for (auto &channel : resultChannels) { - auto mappedChannelIndex = getCurrentMappedChannelIndex(channel.ssrc); + if (_isSingleChannel) { + for (int i = 0; i < readResult.numChannels; i++) { + AudioStreamingPart::StreamingPartChannel emptyPart; + emptyPart.ssrc = i + 1; + resultChannels.push_back(emptyPart); + } - if (mappedChannelIndex) { - int sourceChannelIndex = mappedChannelIndex.value(); + for (int i = 0; i < readResult.numChannels; i++) { + auto channel = resultChannels.begin() + i; + int sourceChannelIndex = i; for (int j = 0; j < readResult.numSamples; j++) { - channel.pcmData.push_back(_pcm10ms[sourceChannelIndex + j * readResult.numChannels]); + channel->pcmData.push_back(_pcm10ms[sourceChannelIndex + j * readResult.numChannels]); } - } else { - for (int j = 0; j < readResult.numSamples; j++) { - channel.pcmData.push_back(0); + channel->numSamples += readResult.numSamples; + } + } else { + for (const auto ssrc : _allSsrcs) { + AudioStreamingPart::StreamingPartChannel emptyPart; + emptyPart.ssrc = ssrc; + resultChannels.push_back(emptyPart); + } + + for (auto &channel : resultChannels) { + auto mappedChannelIndex = getCurrentMappedChannelIndex(channel.ssrc); + + if (mappedChannelIndex) { + int sourceChannelIndex = mappedChannelIndex.value(); + for (int j = 0; j < readResult.numSamples; j++) { + channel.pcmData.push_back(_pcm10ms[sourceChannelIndex + j * readResult.numChannels]); + } + channel.numSamples += readResult.numSamples; + } else { + for (int j = 0; j < readResult.numSamples; j++) { + channel.pcmData.push_back(0); + } + channel.numSamples += readResult.numSamples; } } } @@ -566,6 +286,7 @@ private: } private: + bool _isSingleChannel = false; AudioStreamingPartInternal _parsedPart; std::set _allSsrcs; @@ -577,9 +298,9 @@ private: bool _didReadToEnd = false; }; -AudioStreamingPart::AudioStreamingPart(std::vector &&data) { +AudioStreamingPart::AudioStreamingPart(std::vector &&data, std::string const &container, bool isSingleChannel) { if (!data.empty()) { - _state = new AudioStreamingPartState(std::move(data)); + _state = new AudioStreamingPartState(std::move(data), container, isSingleChannel); } } @@ -597,9 +318,9 @@ int AudioStreamingPart::getRemainingMilliseconds() const { return _state ? _state->getRemainingMilliseconds() : 0; } -std::vector AudioStreamingPart::get10msPerChannel() { +std::vector AudioStreamingPart::get10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder) { return _state - ? _state->get10msPerChannel() + ? _state->get10msPerChannel(persistentDecoder) : std::vector(); } diff --git a/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.h b/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.h index 34b463931..e1e00510c 100644 --- a/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.h +++ b/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPart.h @@ -3,9 +3,12 @@ #include "absl/types/optional.h" #include +#include #include #include +#include "AudioStreamingPartPersistentDecoder.h" + namespace tgcalls { class AudioStreamingPartState; @@ -15,11 +18,12 @@ public: struct StreamingPartChannel { uint32_t ssrc = 0; std::vector pcmData; + int numSamples = 0; }; - - explicit AudioStreamingPart(std::vector &&data); + + explicit AudioStreamingPart(std::vector &&data, std::string const &container, bool isSingleChannel); ~AudioStreamingPart(); - + AudioStreamingPart(const AudioStreamingPart&) = delete; AudioStreamingPart(AudioStreamingPart&& other) { _state = other._state; @@ -30,8 +34,8 @@ public: std::map getEndpointMapping() const; int getRemainingMilliseconds() const; - std::vector get10msPerChannel(); - + std::vector get10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder); + private: AudioStreamingPartState *_state = nullptr; }; diff --git a/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPartInternal.cpp b/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPartInternal.cpp new file mode 100644 index 000000000..27a71cbad --- /dev/null +++ b/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPartInternal.cpp @@ -0,0 +1,367 @@ +#include "AudioStreamingPartInternal.h" + +#include "rtc_base/logging.h" +#include "rtc_base/third_party/base64/base64.h" + +extern "C" { +#include +#include +#include +} + +#include +#include +#include +#include + +namespace tgcalls { + +namespace { + +int16_t sampleFloatToInt16(float sample) { + return av_clip_int16 (static_cast(lrint(sample*32767))); +} + +uint32_t stringToUInt32(std::string const &string) { + std::stringstream stringStream(string); + uint32_t value = 0; + stringStream >> value; + return value; +} + +template +void splitString(const std::string &s, char delim, Out result) { + std::istringstream iss(s); + std::string item; + while (std::getline(iss, item, delim)) { + *result++ = item; + } +} + +std::vector splitString(const std::string &s, char delim) { + std::vector elems; + splitString(s, delim, std::back_inserter(elems)); + return elems; +} + +static absl::optional readInt32(std::string const &data, int &offset) { + if (offset + 4 > data.length()) { + return absl::nullopt; + } + + int32_t value = 0; + memcpy(&value, data.data() + offset, 4); + offset += 4; + + return value; +} + +std::vector parseChannelUpdates(std::string const &data, int &offset) { + std::vector result; + + auto channels = readInt32(data, offset); + if (!channels) { + return {}; + } + + auto count = readInt32(data, offset); + if (!count) { + return {}; + } + + for (int i = 0; i < count.value(); i++) { + auto frameIndex = readInt32(data, offset); + if (!frameIndex) { + return {}; + } + + auto channelId = readInt32(data, offset); + if (!channelId) { + return {}; + } + + auto ssrc = readInt32(data, offset); + if (!ssrc) { + return {}; + } + + AudioStreamingPartInternal::ChannelUpdate update; + update.frameIndex = frameIndex.value(); + update.id = channelId.value(); + update.ssrc = ssrc.value(); + + result.push_back(update); + } + + return result; +} + +} + +AudioStreamingPartInternal::AudioStreamingPartInternal(std::vector &&fileData, std::string const &container) : +_avIoContext(std::move(fileData)) { + int ret = 0; + + _frame = av_frame_alloc(); + + AVInputFormat *inputFormat = av_find_input_format(container.c_str()); + if (!inputFormat) { + _didReadToEnd = true; + return; + } + + _inputFormatContext = avformat_alloc_context(); + if (!_inputFormatContext) { + _didReadToEnd = true; + return; + } + + _inputFormatContext->pb = _avIoContext.getContext(); + + if ((ret = avformat_open_input(&_inputFormatContext, "", inputFormat, nullptr)) < 0) { + _didReadToEnd = true; + return; + } + + if ((ret = avformat_find_stream_info(_inputFormatContext, nullptr)) < 0) { + _didReadToEnd = true; + + avformat_close_input(&_inputFormatContext); + _inputFormatContext = nullptr; + return; + } + + for (int i = 0; i < _inputFormatContext->nb_streams; i++) { + AVStream *inStream = _inputFormatContext->streams[i]; + + AVCodecParameters *inCodecpar = inStream->codecpar; + if (inCodecpar->codec_type != AVMEDIA_TYPE_AUDIO) { + continue; + } + + _audioCodecParameters = avcodec_parameters_alloc(); + avcodec_parameters_copy(_audioCodecParameters, inCodecpar); + + _streamId = i; + + _durationInMilliseconds = (int)((inStream->duration + inStream->first_dts) * 1000 / 48000); + + if (inStream->metadata) { + AVDictionaryEntry *entry = av_dict_get(inStream->metadata, "TG_META", nullptr, 0); + if (entry && entry->value) { + std::string result; + size_t data_used = 0; + std::string sourceBase64 = (const char *)entry->value; + rtc::Base64::Decode(sourceBase64, rtc::Base64::DO_LAX, &result, &data_used); + + if (result.size() != 0) { + int offset = 0; + _channelUpdates = parseChannelUpdates(result, offset); + } + } + + uint32_t videoChannelMask = 0; + entry = av_dict_get(inStream->metadata, "ACTIVE_MASK", nullptr, 0); + if (entry && entry->value) { + std::string sourceString = (const char *)entry->value; + videoChannelMask = stringToUInt32(sourceString); + } + + std::vector endpointList; + entry = av_dict_get(inStream->metadata, "ENDPOINTS", nullptr, 0); + if (entry && entry->value) { + std::string sourceString = (const char *)entry->value; + endpointList = splitString(sourceString, ' '); + } + + std::bitset<32> videoChannels(videoChannelMask); + size_t endpointIndex = 0; + if (videoChannels.count() == endpointList.size()) { + for (size_t i = 0; i < videoChannels.size(); i++) { + if (videoChannels[i]) { + _endpointMapping.insert(std::make_pair(endpointList[endpointIndex], i)); + endpointIndex++; + } + } + } + } + + break; + } + + if (_streamId == -1) { + _didReadToEnd = true; + } +} + +AudioStreamingPartInternal::~AudioStreamingPartInternal() { + if (_frame) { + av_frame_unref(_frame); + } + if (_inputFormatContext) { + avformat_close_input(&_inputFormatContext); + } + if (_audioCodecParameters) { + avcodec_parameters_free(&_audioCodecParameters); + } +} + +AudioStreamingPartInternal::ReadPcmResult AudioStreamingPartInternal::readPcm(AudioStreamingPartPersistentDecoder &persistentDecoder, std::vector &outPcm) { + if (_didReadToEnd) { + return AudioStreamingPartInternal::ReadPcmResult(); + } + + int outPcmSampleOffset = 0; + ReadPcmResult result; + + if (_pcmBufferSampleOffset >= _pcmBufferSampleSize) { + fillPcmBuffer(persistentDecoder); + } + + if (outPcm.size() != 480 * _channelCount) { + outPcm.resize(480 * _channelCount); + } + int readSamples = 0; + if (_channelCount != 0) { + readSamples = (int)outPcm.size() / _channelCount; + } + + while (outPcmSampleOffset < readSamples) { + if (_pcmBufferSampleOffset >= _pcmBufferSampleSize) { + fillPcmBuffer(persistentDecoder); + + if (_pcmBufferSampleOffset >= _pcmBufferSampleSize) { + break; + } + } + + int readFromPcmBufferSamples = std::min(_pcmBufferSampleSize - _pcmBufferSampleOffset, readSamples - outPcmSampleOffset); + if (readFromPcmBufferSamples != 0) { + std::copy(_pcmBuffer.begin() + _pcmBufferSampleOffset * _channelCount, _pcmBuffer.begin() + _pcmBufferSampleOffset * _channelCount + readFromPcmBufferSamples * _channelCount, outPcm.begin() + outPcmSampleOffset * _channelCount); + _pcmBufferSampleOffset += readFromPcmBufferSamples; + outPcmSampleOffset += readFromPcmBufferSamples; + result.numSamples += readFromPcmBufferSamples; + _readSampleCount += readFromPcmBufferSamples; + } + } + + result.numChannels = _channelCount; + + // Uncomment for debugging incomplete frames + /*if (result.numSamples != 480 && result.numSamples != 0) { + RTC_LOG(LS_INFO) << "result.numSamples = " << result.numSamples << ", _readSampleCount = " << _readSampleCount << ", duration = " << _inputFormatContext->streams[_streamId]->duration; + }*/ + + return result; +} + +int AudioStreamingPartInternal::getDurationInMilliseconds() const { + return _durationInMilliseconds; +} + +std::vector const &AudioStreamingPartInternal::getChannelUpdates() const { + return _channelUpdates; +} + +std::map AudioStreamingPartInternal::getEndpointMapping() const { + return _endpointMapping; +} + +void AudioStreamingPartInternal::fillPcmBuffer(AudioStreamingPartPersistentDecoder &persistentDecoder) { + _pcmBufferSampleSize = 0; + _pcmBufferSampleOffset = 0; + + if (_didReadToEnd) { + return; + } + if (!_inputFormatContext) { + _didReadToEnd = true; + return; + } + + int ret = 0; + while (true) { + ret = av_read_frame(_inputFormatContext, &_packet); + if (ret < 0) { + _didReadToEnd = true; + return; + } + + if (_packet.stream_index != _streamId) { + continue; + } + + ret = persistentDecoder.decode(_audioCodecParameters, _inputFormatContext->streams[_streamId]->time_base, _packet, _frame); + + if (ret == AVERROR(EAGAIN)) { + continue; + } + + break; + } + + if (ret != 0) { + _didReadToEnd = true; + return; + } + + if (_channelCount == 0) { + _channelCount = _frame->channels; + } + + if (_channelCount == 0) { + _didReadToEnd = true; + return; + } + + if (_frame->channels != _channelCount || _frame->channels > 8) { + _didReadToEnd = true; + return; + } + + if (_pcmBuffer.size() < _frame->nb_samples * _frame->channels) { + _pcmBuffer.resize(_frame->nb_samples * _frame->channels); + } + + switch (_frame->format) { + case AV_SAMPLE_FMT_S16: { + memcpy(_pcmBuffer.data(), _frame->data[0], _frame->nb_samples * 2 * _frame->channels); + } break; + + case AV_SAMPLE_FMT_S16P: { + int16_t *to = _pcmBuffer.data(); + for (int sample = 0; sample < _frame->nb_samples; ++sample) { + for (int channel = 0; channel < _frame->channels; ++channel) { + int16_t *shortChannel = (int16_t*)_frame->data[channel]; + *to++ = shortChannel[sample]; + } + } + } break; + + case AV_SAMPLE_FMT_FLT: { + float *floatData = (float *)&_frame->data[0]; + for (int i = 0; i < _frame->nb_samples * _frame->channels; i++) { + _pcmBuffer[i] = sampleFloatToInt16(floatData[i]); + } + } break; + + case AV_SAMPLE_FMT_FLTP: { + int16_t *to = _pcmBuffer.data(); + for (int sample = 0; sample < _frame->nb_samples; ++sample) { + for (int channel = 0; channel < _frame->channels; ++channel) { + float *floatChannel = (float*)_frame->data[channel]; + *to++ = sampleFloatToInt16(floatChannel[sample]); + } + } + } break; + + default: { + RTC_FATAL() << "Unexpected sample_fmt"; + } break; + } + + _pcmBufferSampleSize = _frame->nb_samples; + _pcmBufferSampleOffset = 0; +} + +} diff --git a/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPartInternal.h b/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPartInternal.h new file mode 100644 index 000000000..a1637756e --- /dev/null +++ b/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPartInternal.h @@ -0,0 +1,65 @@ +#ifndef TGCALLS_AUDIO_STREAMING_PART_INTERNAL_H +#define TGCALLS_AUDIO_STREAMING_PART_INTERNAL_H + +#include "absl/types/optional.h" +#include +#include +#include +#include + +#include "AVIOContextImpl.h" +#include "AudioStreamingPartPersistentDecoder.h" + +namespace tgcalls { + +class AudioStreamingPartInternal { +public: + struct ReadPcmResult { + int numSamples = 0; + int numChannels = 0; + }; + + struct ChannelUpdate { + int frameIndex = 0; + int id = 0; + uint32_t ssrc = 0; + }; + +public: + AudioStreamingPartInternal(std::vector &&fileData, std::string const &container); + ~AudioStreamingPartInternal(); + + ReadPcmResult readPcm(AudioStreamingPartPersistentDecoder &persistentDecoder, std::vector &outPcm); + int getDurationInMilliseconds() const; + std::vector const &getChannelUpdates() const; + std::map getEndpointMapping() const; + +private: + void fillPcmBuffer(AudioStreamingPartPersistentDecoder &persistentDecoder); + +private: + AVIOContextImpl _avIoContext; + + AVFormatContext *_inputFormatContext = nullptr; + AVPacket _packet; + AVFrame *_frame = nullptr; + AVCodecParameters *_audioCodecParameters = nullptr; + + bool _didReadToEnd = false; + + int _durationInMilliseconds = 0; + int _streamId = -1; + int _channelCount = 0; + + std::vector _channelUpdates; + std::map _endpointMapping; + + std::vector _pcmBuffer; + int _pcmBufferSampleOffset = 0; + int _pcmBufferSampleSize = 0; + int _readSampleCount = 0; +}; + +} + +#endif diff --git a/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPartPersistentDecoder.cpp b/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPartPersistentDecoder.cpp new file mode 100644 index 000000000..e79d4304d --- /dev/null +++ b/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPartPersistentDecoder.cpp @@ -0,0 +1,122 @@ +#include "AudioStreamingPartPersistentDecoder.h" + +#include "rtc_base/logging.h" +#include "rtc_base/third_party/base64/base64.h" + +namespace tgcalls { + +WrappedCodecParameters::WrappedCodecParameters(AVCodecParameters const *codecParameters) { + _value = avcodec_parameters_alloc(); + avcodec_parameters_copy(_value, codecParameters); +} + +WrappedCodecParameters::~WrappedCodecParameters() { + avcodec_parameters_free(&_value); +} + +bool WrappedCodecParameters::isEqual(AVCodecParameters const *other) { + if (_value->codec_id != other->codec_id) { + return false; + } + if (_value->format != other->format) { + return false; + } + if (_value->channels != other->channels) { + return false; + } + return true; +} + +class AudioStreamingPartPersistentDecoderState { +public: + AudioStreamingPartPersistentDecoderState(AVCodecParameters const *codecParameters, AVRational timeBase) : + _codecParameters(codecParameters), + _timeBase(timeBase) { + AVCodec *codec = avcodec_find_decoder(codecParameters->codec_id); + if (codec) { + _codecContext = avcodec_alloc_context3(codec); + int ret = avcodec_parameters_to_context(_codecContext, codecParameters); + if (ret < 0) { + avcodec_free_context(&_codecContext); + _codecContext = nullptr; + } else { + _codecContext->pkt_timebase = timeBase; + + _channelCount = _codecContext->channels; + + ret = avcodec_open2(_codecContext, codec, nullptr); + if (ret < 0) { + avcodec_free_context(&_codecContext); + _codecContext = nullptr; + } + } + } + } + + ~AudioStreamingPartPersistentDecoderState() { + if (_codecContext) { + avcodec_free_context(&_codecContext); + } + } + + int decode(AVPacket &packet, AVFrame *frame) { + int ret = avcodec_send_packet(_codecContext, &packet); + if (ret < 0) { + return ret; + } + + int bytesPerSample = av_get_bytes_per_sample(_codecContext->sample_fmt); + if (bytesPerSample != 2 && bytesPerSample != 4) { + return -1; + } + + ret = avcodec_receive_frame(_codecContext, frame); + return ret; + } + +public: + WrappedCodecParameters _codecParameters; + AVRational _timeBase; + AVCodecContext *_codecContext = nullptr; + int _channelCount = 0; +}; + +AudioStreamingPartPersistentDecoder::AudioStreamingPartPersistentDecoder() { +} + +AudioStreamingPartPersistentDecoder::~AudioStreamingPartPersistentDecoder() { +} + +void AudioStreamingPartPersistentDecoder::maybeReset(AVCodecParameters const *codecParameters, AVRational timeBase) { + if (_state) { + bool isUpdated = false; + if (!_state->_codecParameters.isEqual(codecParameters)) { + isUpdated = true; + } + if (_state->_timeBase.num != timeBase.num || _state->_timeBase.den != timeBase.den) { + isUpdated = true; + } + if (!isUpdated) { + return; + } + } + + if (_state) { + delete _state; + _state = nullptr; + } + + _state = new AudioStreamingPartPersistentDecoderState(codecParameters, timeBase); +} + +int AudioStreamingPartPersistentDecoder::decode(AVCodecParameters const *codecParameters, AVRational timeBase, AVPacket &packet, AVFrame *frame) { + maybeReset(codecParameters, timeBase); + + if (!_state) { + return -1; + } + + return _state->decode(packet, frame); +} + +} diff --git a/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPartPersistentDecoder.h b/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPartPersistentDecoder.h new file mode 100644 index 000000000..e5bb98d90 --- /dev/null +++ b/TMessagesProj/jni/voip/tgcalls/group/AudioStreamingPartPersistentDecoder.h @@ -0,0 +1,51 @@ +#ifndef TGCALLS_AUDIO_STREAMING_PART_PERSISTENT_DECODER_H +#define TGCALLS_AUDIO_STREAMING_PART_PERSISTENT_DECODER_H + +#include "absl/types/optional.h" +#include +#include +#include +#include + +// Fix build on Windows - this should appear before FFmpeg timestamp include. +#define _USE_MATH_DEFINES +#include + +extern "C" { +#include +#include +#include +} + +namespace tgcalls { + +class AudioStreamingPartPersistentDecoderState; + +class WrappedCodecParameters { +public: + WrappedCodecParameters(AVCodecParameters const *codecParameters); + ~WrappedCodecParameters(); + + bool isEqual(AVCodecParameters const *other); + +private: + AVCodecParameters *_value = nullptr; +}; + +class AudioStreamingPartPersistentDecoder { +public: + AudioStreamingPartPersistentDecoder(); + ~AudioStreamingPartPersistentDecoder(); + + int decode(AVCodecParameters const *codecParameters, AVRational timeBase, AVPacket &packet, AVFrame *frame); + +private: + void maybeReset(AVCodecParameters const *codecParameters, AVRational timeBase); + +private: + AudioStreamingPartPersistentDecoderState *_state = nullptr; +}; + +} + +#endif diff --git a/TMessagesProj/jni/voip/tgcalls/group/GroupInstanceCustomImpl.cpp b/TMessagesProj/jni/voip/tgcalls/group/GroupInstanceCustomImpl.cpp index 3ba311d88..fc53be4e3 100644 --- a/TMessagesProj/jni/voip/tgcalls/group/GroupInstanceCustomImpl.cpp +++ b/TMessagesProj/jni/voip/tgcalls/group/GroupInstanceCustomImpl.cpp @@ -24,18 +24,20 @@ #include "system_wrappers/include/field_trial.h" #include "api/video/builtin_video_bitrate_allocator_factory.h" #include "call/call.h" -#include "modules/rtp_rtcp/source/rtp_utility.h" +#include "media/base/rtp_utils.h" #include "api/call/audio_sink.h" #include "modules/audio_processing/audio_buffer.h" #include "absl/strings/match.h" -#include "modules/audio_processing/agc2/vad_with_level.h" #include "modules/audio_processing/agc2/cpu_features.h" +#include "modules/audio_processing/agc2/vad_wrapper.h" #include "pc/channel_manager.h" #include "audio/audio_state.h" #include "modules/audio_coding/neteq/default_neteq_factory.h" #include "modules/audio_coding/include/audio_coding_module.h" #include "common_audio/include/audio_util.h" #include "modules/audio_device/include/audio_device_data_observer.h" +#include "common_audio/resampler/include/resampler.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "AudioFrame.h" #include "ThreadLocalObject.h" @@ -50,7 +52,9 @@ #include "AudioDeviceHelper.h" #include "FakeAudioDeviceModule.h" #include "StreamingMediaContext.h" - +#ifdef WEBRTC_IOS +#include "platform/darwin/iOS/tgcalls_audio_device_module_ios.h" +#endif #include #include #include @@ -273,7 +277,7 @@ static std::vector filterSupportedVideoFormats(std::vect return lhsLevelAssymetryAllowedPriority < rhsLevelAssymetryAllowedPriority; } - return true; + return false; }); filteredFormats.push_back(h264Formats[0]); @@ -467,15 +471,14 @@ public: class CombinedVad { private: - std::unique_ptr _vadWithLevel; + webrtc::VoiceActivityDetectorWrapper _vadWithLevel; VadHistory _history; bool _countFrames; std::atomic _waitingFramesToProcess{0}; public: - CombinedVad(bool count = false){ - _vadWithLevel = std::make_unique(500, webrtc::GetAvailableCpuFeatures()); - _countFrames = count; + CombinedVad() : + _vadWithLevel(500, webrtc::GetAvailableCpuFeatures(), webrtc::AudioProcessing::kSampleRate48kHz) { } ~CombinedVad() = default; @@ -489,25 +492,25 @@ public: } bool update(webrtc::AudioBuffer *buffer) { - if (buffer) { - if (_countFrames) { - _waitingFramesToProcess--; - } - if (buffer->num_channels() <= 0) { - return _history.update(0.0f); - } - webrtc::AudioFrameView frameView(buffer->channels(), buffer->num_channels(), buffer->num_frames()); - float peak = 0.0f; - for (const auto &x : frameView.channel(0)) { - peak = std::max(std::fabs(x), peak); - } - if (peak <= 0.01f) { - return _history.update(false); - } - auto result = _vadWithLevel->AnalyzeFrame(frameView); - return _history.update(result.speech_probability); + if (buffer->num_channels() <= 0) { + return _history.update(0.0f); } - return _history.update(-1); + webrtc::AudioFrameView frameView(buffer->channels(), (int)(buffer->num_channels()), (int)(buffer->num_frames())); + float peak = 0.0f; + for (const auto &x : frameView.channel(0)) { + peak = std::max(std::fabs(x), peak); + } + if (peak <= 0.01f) { + return _history.update(false); + } + + auto result = _vadWithLevel.Analyze(frameView); + + return _history.update(result); + } + + bool update() { + return _history.update(0.0f); } }; @@ -553,7 +556,7 @@ public: AudioSinkImpl(std::function update, ChannelId channel_id, std::function onAudioFrame) : _update(update), _channel_id(channel_id), _onAudioFrame(std::move(onAudioFrame)) { - _vad = std::make_shared(true); + //_vad = std::make_shared(true); } virtual ~AudioSinkImpl() { @@ -620,7 +623,7 @@ public: virtual void OnFrame(const webrtc::VideoFrame& frame) override { std::unique_lock lock{ _mutex }; - /*int64_t timestamp = rtc::TimeMillis(); + int64_t timestamp = rtc::TimeMillis(); if (_lastFrame) { if (_lastFrame->video_frame_buffer()->width() != frame.video_frame_buffer()->width()) { int64_t deltaTime = std::abs(_lastFrameSizeChangeTimestamp - timestamp); @@ -635,7 +638,7 @@ public: _lastFrameSizeChangeHeight = 0; _lastFrameSizeChangeTimestamp = timestamp; } - _lastFrame = frame;*/ + _lastFrame = frame; for (int i = (int)(_sinks.size()) - 1; i >= 0; i--) { auto strong = _sinks[i].lock(); if (!strong) { @@ -659,11 +662,10 @@ public: } void addSink(std::weak_ptr> impl) { - std::unique_lock lock{ _mutex }; - _sinks.push_back(impl); - if (_lastFrame) { - auto strong = impl.lock(); - if (strong) { + if (const auto strong = impl.lock()) { + std::unique_lock lock{ _mutex }; + _sinks.push_back(impl); + if (_lastFrame) { strong->OnFrame(_lastFrame.value()); } } @@ -927,7 +929,8 @@ public: std::string streamId = std::string("stream") + ssrc.name(); - _audioChannel = _channelManager->CreateVoiceChannel(_call, cricket::MediaConfig(), rtpTransport, _threads->getWorkerThread(), std::string("audio") + uint32ToString(ssrc.networkSsrc), false, GroupNetworkManager::getDefaulCryptoOptions(), randomIdGenerator, audioOptions); + _audioChannel = _channelManager->CreateVoiceChannel(_call, cricket::MediaConfig(), std::string("audio") + uint32ToString(ssrc.networkSsrc), false, GroupNetworkManager::getDefaulCryptoOptions(), audioOptions); + _audioChannel->SetRtpTransport(rtpTransport); const uint8_t opusPTimeMs = 120; @@ -964,8 +967,9 @@ public: streamParams.set_stream_ids({ streamId }); incomingAudioDescription->AddStream(streamParams); - _audioChannel->SetLocalContent(outgoingAudioDescription.get(), webrtc::SdpType::kOffer, nullptr); - _audioChannel->SetRemoteContent(incomingAudioDescription.get(), webrtc::SdpType::kAnswer, nullptr); + std::string error_desc; + _audioChannel->SetLocalContent(outgoingAudioDescription.get(), webrtc::SdpType::kOffer, error_desc); + _audioChannel->SetRemoteContent(incomingAudioDescription.get(), webrtc::SdpType::kAnswer, error_desc); _audioChannel->SetPayloadTypeDemuxingEnabled(false); outgoingAudioDescription.reset(); @@ -1100,10 +1104,11 @@ public: incomingVideoDescription->AddStream(videoRecvStreamParams); - _videoChannel = _channelManager->CreateVideoChannel(_call, cricket::MediaConfig(), rtpTransport, _threads->getWorkerThread(), std::string("video") + uint32ToString(mid), false, GroupNetworkManager::getDefaulCryptoOptions(), randomIdGenerator, cricket::VideoOptions(), _videoBitrateAllocatorFactory.get()); - - _videoChannel->SetLocalContent(outgoingVideoDescription.get(), webrtc::SdpType::kOffer, nullptr); - _videoChannel->SetRemoteContent(incomingVideoDescription.get(), webrtc::SdpType::kAnswer, nullptr); + _videoChannel = _channelManager->CreateVideoChannel(_call, cricket::MediaConfig(), std::string("video") + uint32ToString(mid), false, GroupNetworkManager::getDefaulCryptoOptions(), cricket::VideoOptions(), _videoBitrateAllocatorFactory.get()); + _videoChannel->SetRtpTransport(rtpTransport); + std::string error_desc; + _videoChannel->SetLocalContent(outgoingVideoDescription.get(), webrtc::SdpType::kOffer, error_desc); + _videoChannel->SetRemoteContent(incomingVideoDescription.get(), webrtc::SdpType::kAnswer, error_desc); _videoChannel->SetPayloadTypeDemuxingEnabled(false); _videoChannel->media_channel()->SetSink(_mainVideoSsrc, _videoSink.get()); @@ -1256,29 +1261,40 @@ public: } void mixAudio(int16_t *audio_samples, const size_t num_samples, const size_t num_channels, const uint32_t samples_per_sec) { - const auto numSamplesOut = num_samples * num_channels; - const auto numBytesOut = sizeof(int16_t) * numSamplesOut; - if (samples_per_sec != 48000) { - return; - } - - if (_buffer.size() < numSamplesOut) { - _buffer.resize(numSamplesOut); - } - _mutex.Lock(); const auto context = _streamingContext; _mutex.Unlock(); if (context) { - context->getAudio(_buffer.data(), num_samples, num_channels, samples_per_sec); - memcpy(audio_samples, _buffer.data(), numBytesOut); + if (_samplesToResample.size() < 480 * num_channels) { + _samplesToResample.resize(480 * num_channels); + } + memset(_samplesToResample.data(), 0, _samplesToResample.size() * sizeof(int16_t)); + + context->getAudio(_samplesToResample.data(), 480, num_channels, 48000); + + if (_resamplerFrequency != samples_per_sec || _resamplerNumChannels != num_channels) { + _resamplerFrequency = samples_per_sec; + _resamplerNumChannels = num_channels; + _resampler = std::make_unique(); + if (_resampler->Reset(48000, samples_per_sec, num_channels) == -1) { + _resampler = nullptr; + } + } + + if (_resampler) { + size_t outLen = 0; + _resampler->Push(_samplesToResample.data(), _samplesToResample.size(), (int16_t *)audio_samples, num_samples * num_channels, outLen); + } } } private: webrtc::Mutex _mutex; - std::vector _buffer; + std::unique_ptr _resampler; + uint32_t _resamplerFrequency = 0; + size_t _resamplerNumChannels = 0; + std::vector _samplesToResample; std::shared_ptr _streamingContext; }; @@ -1303,12 +1319,16 @@ public: const size_t bytes_per_sample, const size_t num_channels, const uint32_t samples_per_sec) override { - if (samples_per_sec != 48000) { - return; - } if (bytes_per_sample != num_channels * 2) { return; } + if (samples_per_sec % 100 != 0) { + return; + } + if (num_samples != samples_per_sec / 100) { + return; + } + if (_shared) { _shared->mixAudio((int16_t *)audio_samples, num_samples, num_channels, samples_per_sec); } @@ -1318,6 +1338,48 @@ private: std::shared_ptr _shared; }; +class CustomNetEqFactory: public webrtc::NetEqFactory { +public: + virtual ~CustomNetEqFactory() = default; + + std::unique_ptr CreateNetEq( + const webrtc::NetEq::Config& config, + const rtc::scoped_refptr& decoder_factory, webrtc::Clock* clock + ) const override { + webrtc::NetEq::Config updatedConfig = config; + updatedConfig.sample_rate_hz = 48000; + return webrtc::DefaultNetEqFactory().CreateNetEq(updatedConfig, decoder_factory, clock); + } +}; + +std::unique_ptr createNetEqFactory() { + return std::make_unique(); +} + +class CustomEchoDetector : public webrtc::EchoDetector { +public: + // (Re-)Initializes the submodule. + virtual void Initialize(int capture_sample_rate_hz, + int num_capture_channels, + int render_sample_rate_hz, + int num_render_channels) override { + } + + // Analysis (not changing) of the render signal. + virtual void AnalyzeRenderAudio(rtc::ArrayView render_audio) override { + } + + // Analysis (not changing) of the capture signal. + virtual void AnalyzeCaptureAudio( + rtc::ArrayView capture_audio) override { + } + + // Collect current metrics from the echo detector. + virtual Metrics GetMetrics() const override { + return webrtc::EchoDetector::Metrics(); + } +}; + } // namespace class GroupInstanceCustomInternal : public sigslot::has_slots<>, public std::enable_shared_from_this { @@ -1338,11 +1400,15 @@ public: _useDummyChannel(descriptor.useDummyChannel), _outgoingAudioBitrateKbit(descriptor.outgoingAudioBitrateKbit), _disableOutgoingAudioProcessing(descriptor.disableOutgoingAudioProcessing), +#ifdef WEBRTC_IOS + _disableAudioInput(descriptor.disableAudioInput), +#endif _minOutgoingVideoBitrateKbit(descriptor.minOutgoingVideoBitrateKbit), _videoContentType(descriptor.videoContentType), _videoCodecPreferences(std::move(descriptor.videoCodecPreferences)), _eventLog(std::make_unique()), _taskQueueFactory(webrtc::CreateDefaultTaskQueueFactory()), + _netEqFactory(createNetEqFactory()), _createAudioDeviceModule(descriptor.createAudioDeviceModule), _initialInputDeviceId(std::move(descriptor.initialInputDeviceId)), _initialOutputDeviceId(std::move(descriptor.initialOutputDeviceId)), @@ -1400,14 +1466,13 @@ public: "WebRTC-TaskQueuePacer/Enabled/" "WebRTC-VP8ConferenceTemporalLayers/1/" "WebRTC-Audio-MinimizeResamplingOnMobile/Enabled/" - //"WebRTC-MutedStateKillSwitch/Enabled/" - //"WebRTC-VP8IosMaxNumberOfThread/max_thread:1/" + "WebRTC-BweLossExperiment/Enabled/" ); _networkManager.reset(new ThreadLocalObject(_threads->getNetworkThread(), [weak, threads = _threads] () mutable { return new GroupNetworkManager( [=](const GroupNetworkManager::State &state) { - threads->getMediaThread()->PostTask(RTC_FROM_HERE, [=] { + threads->getMediaThread()->PostTask([=] { const auto strong = weak.lock(); if (!strong) { return; @@ -1419,28 +1484,28 @@ public: if (!isUnresolved) { return; } - threads->getMediaThread()->PostTask(RTC_FROM_HERE, [weak, message, isUnresolved]() mutable { + threads->getMediaThread()->PostTask([weak, message, isUnresolved]() mutable { if (const auto strong = weak.lock()) { strong->receivePacket(message, isUnresolved); } }); }, [=](bool isDataChannelOpen) { - threads->getMediaThread()->PostTask(RTC_FROM_HERE, [weak, isDataChannelOpen]() mutable { + threads->getMediaThread()->PostTask([weak, isDataChannelOpen]() mutable { if (const auto strong = weak.lock()) { strong->updateIsDataChannelOpen(isDataChannelOpen); } }); }, [=](std::string const &message) { - threads->getMediaThread()->PostTask(RTC_FROM_HERE, [weak, message]() { + threads->getMediaThread()->PostTask([weak, message]() { if (const auto strong = weak.lock()) { strong->receiveDataChannelMessage(message); } }); }, [=](uint32_t ssrc, uint8_t audioLevel, bool isSpeech) { - threads->getMediaThread()->PostTask(RTC_FROM_HERE, [weak, ssrc, audioLevel, isSpeech]() { + threads->getMediaThread()->PostTask([weak, ssrc, audioLevel, isSpeech]() { if (const auto strong = weak.lock()) { strong->updateSsrcAudioLevel(ssrc, audioLevel, isSpeech); } @@ -1456,7 +1521,7 @@ public: #if USE_RNNOISE audioProcessor = std::make_unique([weak, threads = _threads](GroupLevelValue const &level) { - threads->getMediaThread()->PostTask(RTC_FROM_HERE, [weak, level](){ + threads->getMediaThread()->PostTask([weak, level](){ auto strong = weak.lock(); if (!strong) { return; @@ -1477,7 +1542,7 @@ public: mediaDeps.audio_encoder_factory = webrtc::CreateAudioEncoderFactory(); mediaDeps.audio_decoder_factory = webrtc::CreateAudioDecoderFactory(); - mediaDeps.video_encoder_factory = PlatformInterface::SharedInstance()->makeVideoEncoderFactory(_platformContext); + mediaDeps.video_encoder_factory = PlatformInterface::SharedInstance()->makeVideoEncoderFactory(_platformContext, false, _videoContentType == VideoContentType::Screencast); mediaDeps.video_decoder_factory = PlatformInterface::SharedInstance()->makeVideoDecoderFactory(_platformContext); #if USE_RNNOISE @@ -1485,6 +1550,8 @@ public: webrtc::AudioProcessingBuilder builder; builder.SetCapturePostProcessing(std::move(audioProcessor)); + builder.SetEchoDetector(rtc::make_ref_counted()); + mediaDeps.audio_processing = builder.Create(); } #endif @@ -1514,10 +1581,11 @@ public: _threads->getWorkerThread()->Invoke(RTC_FROM_HERE, [this]() { webrtc::Call::Config callConfig(_eventLog.get(), _threads->getNetworkThread()); + callConfig.neteq_factory = _netEqFactory.get(); callConfig.task_queue_factory = _taskQueueFactory.get(); callConfig.trials = &_fieldTrials; callConfig.audio_state = _channelManager->media_engine()->voice().GetAudioState(); - _call.reset(webrtc::Call::Create(callConfig, _threads->getSharedModuleThread())); + _call.reset(webrtc::Call::Create(callConfig, webrtc::Clock::GetRealTimeClock(), _threads->getSharedModuleThread(), webrtc::ProcessThread::Create("PacerThread"))); }); _uniqueRandomIdGenerator.reset(new rtc::UniqueRandomIdGenerator()); @@ -1586,8 +1654,8 @@ public: if (_videoContentType == VideoContentType::Screencast) { videoOptions.is_screencast = true; } - _outgoingVideoChannel = _channelManager->CreateVideoChannel(_call.get(), cricket::MediaConfig(), _rtpTransport, _threads->getWorkerThread(), "1", false, GroupNetworkManager::getDefaulCryptoOptions(), _uniqueRandomIdGenerator.get(), videoOptions, _videoBitrateAllocatorFactory.get()); - + _outgoingVideoChannel = _channelManager->CreateVideoChannel(_call.get(), cricket::MediaConfig(), "1", false, GroupNetworkManager::getDefaulCryptoOptions(), videoOptions, _videoBitrateAllocatorFactory.get()); + _outgoingVideoChannel->SetRtpTransport(_rtpTransport); if (!_outgoingVideoChannel) { RTC_LOG(LS_ERROR) << "Could not create outgoing video channel."; return; @@ -1646,11 +1714,12 @@ public: incomingVideoDescription->set_codecs({ _selectedPayloadType->videoCodec, _selectedPayloadType->rtxCodec }); incomingVideoDescription->set_bandwidth(1300000); - _threads->getWorkerThread()->Invoke(RTC_FROM_HERE, [this, incomingVideoDescription, outgoingVideoDescription]() { - _outgoingVideoChannel->SetRemoteContent(incomingVideoDescription.get(), webrtc::SdpType::kAnswer, nullptr); - _outgoingVideoChannel->SetLocalContent(outgoingVideoDescription.get(), webrtc::SdpType::kOffer, nullptr); - _outgoingVideoChannel->SetPayloadTypeDemuxingEnabled(false); - }); + _threads->getWorkerThread()->Invoke(RTC_FROM_HERE, [&]() { + std::string error_desc; + _outgoingVideoChannel->SetRemoteContent(incomingVideoDescription.get(), webrtc::SdpType::kAnswer, error_desc); + _outgoingVideoChannel->SetLocalContent(outgoingVideoDescription.get(), webrtc::SdpType::kOffer, error_desc); + _outgoingVideoChannel->SetPayloadTypeDemuxingEnabled(false); + }); adjustVideoSendParams(); updateVideoSend(); @@ -1782,21 +1851,21 @@ public: audioOptions.auto_gain_control = false; audioOptions.highpass_filter = false; audioOptions.typing_detection = false; - audioOptions.experimental_agc = false; - audioOptions.experimental_ns = false; +// audioOptions.experimental_agc = false; +// audioOptions.experimental_ns = false; audioOptions.residual_echo_detector = false; } else { audioOptions.echo_cancellation = true; audioOptions.noise_suppression = true; - audioOptions.experimental_ns = true; +// audioOptions.experimental_ns = true; audioOptions.residual_echo_detector = true; } std::vector streamIds; streamIds.push_back("1"); - _outgoingAudioChannel = _channelManager->CreateVoiceChannel(_call.get(), cricket::MediaConfig(), _rtpTransport, _threads->getWorkerThread(), "0", false, GroupNetworkManager::getDefaulCryptoOptions(), _uniqueRandomIdGenerator.get(), audioOptions); - + _outgoingAudioChannel = _channelManager->CreateVoiceChannel(_call.get(), cricket::MediaConfig(), "0", false, GroupNetworkManager::getDefaulCryptoOptions(), audioOptions); + _outgoingAudioChannel->SetRtpTransport(_rtpTransport); const uint8_t opusMinBitrateKbps = _outgoingAudioBitrateKbit; const uint8_t opusMaxBitrateKbps = _outgoingAudioBitrateKbit; const uint8_t opusStartBitrateKbps = _outgoingAudioBitrateKbit; @@ -1831,9 +1900,10 @@ public: incomingAudioDescription->set_codecs({ opusCodec }); incomingAudioDescription->set_bandwidth(1300000); - _threads->getWorkerThread()->Invoke(RTC_FROM_HERE, [this, outgoingAudioDescription, incomingAudioDescription]() mutable { - _outgoingAudioChannel->SetLocalContent(outgoingAudioDescription.get(), webrtc::SdpType::kOffer, nullptr); - _outgoingAudioChannel->SetRemoteContent(incomingAudioDescription.get(), webrtc::SdpType::kAnswer, nullptr); + _threads->getWorkerThread()->Invoke(RTC_FROM_HERE, [&]() { + std::string error_desc; + _outgoingAudioChannel->SetLocalContent(outgoingAudioDescription.get(), webrtc::SdpType::kOffer, error_desc); + _outgoingAudioChannel->SetRemoteContent(incomingAudioDescription.get(), webrtc::SdpType::kAnswer, error_desc); _outgoingAudioChannel->SetPayloadTypeDemuxingEnabled(false); _outgoingAudioChannel->Enable(true); }); @@ -1879,7 +1949,7 @@ public: void beginLevelsTimer(int timeoutMs) { const auto weak = std::weak_ptr(shared_from_this()); - _threads->getMediaThread()->PostDelayedTask(RTC_FROM_HERE, [weak]() { + _threads->getMediaThread()->PostDelayedTask([weak]() { auto strong = weak.lock(); if (!strong) { return; @@ -1940,7 +2010,7 @@ public: void beginAudioChannelCleanupTimer(int delayMs) { const auto weak = std::weak_ptr(shared_from_this()); - _threads->getMediaThread()->PostDelayedTask(RTC_FROM_HERE, [weak]() { + _threads->getMediaThread()->PostDelayedTask([weak]() { auto strong = weak.lock(); if (!strong) { return; @@ -1969,7 +2039,7 @@ public: void beginRemoteConstraintsUpdateTimer(int delayMs) { const auto weak = std::weak_ptr(shared_from_this()); - _threads->getMediaThread()->PostDelayedTask(RTC_FROM_HERE, [weak]() { + _threads->getMediaThread()->PostDelayedTask([weak]() { auto strong = weak.lock(); if (!strong) { return; @@ -1983,7 +2053,7 @@ public: void beginNetworkStatusTimer(int delayMs) { const auto weak = std::weak_ptr(shared_from_this()); - _threads->getMediaThread()->PostDelayedTask(RTC_FROM_HERE, [weak]() { + _threads->getMediaThread()->PostDelayedTask([weak]() { auto strong = weak.lock(); if (!strong) { return; @@ -2137,11 +2207,9 @@ public: void OnRtcpPacketReceived_n(rtc::CopyOnWriteBuffer *buffer, int64_t packet_time_us) { rtc::CopyOnWriteBuffer packet = *buffer; - _threads->getWorkerThread()->PostTask(ToQueuedTask(_workerThreadSafery, [this, packet, packet_time_us] { - if (_call) { - _call->Receiver()->DeliverPacket(webrtc::MediaType::ANY, packet, packet_time_us); - } - })); + if (_call) { + _call->Receiver()->DeliverPacket(webrtc::MediaType::ANY, packet, packet_time_us); + } } void adjustBitratePreferences(bool resetStartBitrate) { @@ -2253,39 +2321,29 @@ public: } } - webrtc::RtpUtility::RtpHeaderParser rtpParser(packet.data(), packet.size()); - - webrtc::RTPHeader header; - if (rtpParser.RTCP()) { - if (!rtpParser.ParseRtcp(&header)) { - RTC_LOG(LS_INFO) << "Could not parse rtcp header"; - return; - } - + if (webrtc::IsRtcpPacket(packet)) { _threads->getWorkerThread()->Invoke(RTC_FROM_HERE, [this, packet]() { _call->Receiver()->DeliverPacket(webrtc::MediaType::ANY, packet, -1); }); } else { - if (!rtpParser.Parse(&header)) { - // Probably a data channel message + uint32_t ssrc = webrtc::ParseRtpSsrc(packet); + int payloadType = webrtc::ParseRtpPayloadType(packet); + + if (ssrc == _outgoingAudioSsrc) { return; } - if (header.ssrc == _outgoingAudioSsrc) { - return; - } - - auto ssrcInfo = _channelBySsrc.find(header.ssrc); + auto ssrcInfo = _channelBySsrc.find(ssrc); if (ssrcInfo == _channelBySsrc.end()) { // opus - if (header.payloadType == 111) { - maybeRequestUnknownSsrc(header.ssrc); - _missingPacketBuffer.add(header.ssrc, packet); + if (payloadType == 111) { + maybeRequestUnknownSsrc(ssrc); + _missingPacketBuffer.add(ssrc, packet); } } else { switch (ssrcInfo->second.type) { case ChannelSsrcInfo::Type::Audio: { - const auto it = _incomingAudioChannels.find(ChannelId(header.ssrc)); + const auto it = _incomingAudioChannels.find(ChannelId(ssrc)); if (it != _incomingAudioChannels.end()) { it->second->updateActivity(); } @@ -2304,7 +2362,7 @@ public: } void receiveRtcpPacket(rtc::CopyOnWriteBuffer const &packet, int64_t timestamp) { - _threads->getWorkerThread()->PostTask(RTC_FROM_HERE, [this, packet, timestamp]() { + _threads->getWorkerThread()->PostTask([this, packet, timestamp]() { _call->Receiver()->DeliverPacket(webrtc::MediaType::ANY, packet, timestamp); }); } @@ -2335,7 +2393,7 @@ public: _pendingOutgoingVideoConstraintRequestId += 1; const auto weak = std::weak_ptr(shared_from_this()); - _threads->getMediaThread()->PostDelayedTask(RTC_FROM_HERE, [weak, requestId]() { + _threads->getMediaThread()->PostDelayedTask([weak, requestId]() { auto strong = weak.lock(); if (!strong) { return; @@ -2435,7 +2493,7 @@ public: const auto weak = std::weak_ptr(shared_from_this()); auto task = _requestMediaChannelDescriptions(requestSsrcs, [weak, threads = _threads, requestId](std::vector &&descriptions) { - threads->getWorkerThread()->PostTask(RTC_FROM_HERE, [weak, requestId, descriptions = std::move(descriptions)]() mutable { + threads->getWorkerThread()->PostTask([weak, requestId, descriptions = std::move(descriptions)]() mutable { auto strong = weak.lock(); if (!strong) { return; @@ -2555,10 +2613,11 @@ public: }); } - void setConnectionMode(GroupConnectionMode connectionMode, bool keepBroadcastIfWasEnabled) { + void setConnectionMode(GroupConnectionMode connectionMode, bool keepBroadcastIfWasEnabled, bool isUnifiedBroadcast) { if (_connectionMode != connectionMode || connectionMode == GroupConnectionMode::GroupConnectionModeNone) { GroupConnectionMode previousMode = _connectionMode; _connectionMode = connectionMode; + _isUnifiedBroadcast = isUnifiedBroadcast; onConnectionModeUpdated(previousMode, keepBroadcastIfWasEnabled); } } @@ -2612,6 +2671,7 @@ public: const auto weak = std::weak_ptr(shared_from_this()); arguments.threads = _threads; arguments.platformContext = _platformContext; + arguments.isUnifiedBroadcast = _isUnifiedBroadcast; arguments.requestCurrentTime = _requestCurrentTime; arguments.requestAudioBroadcastPart = _requestAudioBroadcastPart; arguments.requestVideoBroadcastPart = _requestVideoBroadcastPart; @@ -2653,7 +2713,7 @@ public: break; } default: { - //RTC_FATAL() << "Unknown connectionMode"; + RTC_FATAL() << "Unknown connectionMode"; break; } } @@ -2920,6 +2980,14 @@ public: _noiseSuppressionConfiguration->isEnabled = isNoiseSuppressionEnabled; } + void addOutgoingVideoOutput(std::weak_ptr> sink) { + _videoCaptureSink->addSink(sink); + + if (_videoCapture) { + _videoCapture->setOutput(_videoCaptureSink); + } + } + void addIncomingVideoOutput(std::string const &endpointId, std::weak_ptr> sink) { if (_sharedVideoInformation && endpointId == _sharedVideoInformation->endpointId) { if (_videoCapture) { @@ -2945,7 +3013,7 @@ public: return; } - if (_incomingAudioChannels.size() > 5) { + if (_incomingAudioChannels.size() > 10) { auto timestamp = rtc::TimeMillis(); int64_t minActivity = INT64_MAX; @@ -2966,7 +3034,7 @@ public: removeIncomingAudioChannel(minActivityChannelId); } - if (_incomingAudioChannels.size() > 5) { + if (_incomingAudioChannels.size() > 10) { // Wait until there is a channel that hasn't been active in 1 second return; } @@ -2978,7 +3046,7 @@ public: if (ssrc.actualSsrc != ssrc.networkSsrc) { if (_audioLevelsUpdated) { onAudioSinkUpdate = [weak, ssrc = ssrc, threads = _threads](AudioSinkImpl::Update update) { - threads->getMediaThread()->PostTask(RTC_FROM_HERE, [weak, ssrc, update]() { + threads->getMediaThread()->PostTask([weak, ssrc, update]() { auto strong = weak.lock(); if (!strong) { return; @@ -3229,10 +3297,17 @@ public: private: rtc::scoped_refptr createAudioDeviceModule() { auto audioDeviceDataObserverShared = _audioDeviceDataObserverShared; +#ifdef WEBRTC_IOS + bool disableRecording = _disableAudioInput; +#endif const auto create = [&](webrtc::AudioDeviceModule::AudioLayer layer) { +#ifdef WEBRTC_IOS + return rtc::make_ref_counted(false, disableRecording); +#else return webrtc::AudioDeviceModule::Create( layer, _taskQueueFactory.get()); +#endif }; const auto check = [&](const rtc::scoped_refptr &result) -> rtc::scoped_refptr { if (!result) { @@ -3267,6 +3342,7 @@ private: private: std::shared_ptr _threads; GroupConnectionMode _connectionMode = GroupConnectionMode::GroupConnectionModeNone; + bool _isUnifiedBroadcast = false; std::function _networkStateUpdated; std::function _audioLevelsUpdated; @@ -3282,6 +3358,9 @@ private: bool _useDummyChannel{true}; int _outgoingAudioBitrateKbit{32}; bool _disableOutgoingAudioProcessing{false}; +#ifdef WEBRTC_IOS + bool _disableAudioInput{false}; +#endif int _minOutgoingVideoBitrateKbit{100}; VideoContentType _videoContentType{VideoContentType::None}; std::vector _videoCodecPreferences; @@ -3293,6 +3372,7 @@ private: std::unique_ptr _eventLog; std::unique_ptr _taskQueueFactory; + std::unique_ptr _netEqFactory; std::unique_ptr _mediaEngine; std::unique_ptr _call; webrtc::FieldTrialBasedConfig _fieldTrials; @@ -3401,9 +3481,9 @@ void GroupInstanceCustomImpl::stop() { }); } -void GroupInstanceCustomImpl::setConnectionMode(GroupConnectionMode connectionMode, bool keepBroadcastIfWasEnabled) { - _internal->perform(RTC_FROM_HERE, [connectionMode, keepBroadcastIfWasEnabled](GroupInstanceCustomInternal *internal) { - internal->setConnectionMode(connectionMode, keepBroadcastIfWasEnabled); +void GroupInstanceCustomImpl::setConnectionMode(GroupConnectionMode connectionMode, bool keepBroadcastIfWasEnabled, bool isUnifiedBroadcast) { + _internal->perform(RTC_FROM_HERE, [connectionMode, keepBroadcastIfWasEnabled, isUnifiedBroadcast](GroupInstanceCustomInternal *internal) { + internal->setConnectionMode(connectionMode, keepBroadcastIfWasEnabled, isUnifiedBroadcast); }); } @@ -3473,6 +3553,12 @@ void GroupInstanceCustomImpl::addExternalAudioSamples(std::vector &&sam }); } +void GroupInstanceCustomImpl::addOutgoingVideoOutput(std::weak_ptr> sink) { + _internal->perform(RTC_FROM_HERE, [sink](GroupInstanceCustomInternal *internal) mutable { + internal->addOutgoingVideoOutput(sink); + }); +} + void GroupInstanceCustomImpl::addIncomingVideoOutput(std::string const &endpointId, std::weak_ptr> sink) { _internal->perform(RTC_FROM_HERE, [endpointId, sink](GroupInstanceCustomInternal *internal) mutable { internal->addIncomingVideoOutput(endpointId, sink); diff --git a/TMessagesProj/jni/voip/tgcalls/group/GroupInstanceCustomImpl.h b/TMessagesProj/jni/voip/tgcalls/group/GroupInstanceCustomImpl.h index 41f75c02d..b497a7960 100644 --- a/TMessagesProj/jni/voip/tgcalls/group/GroupInstanceCustomImpl.h +++ b/TMessagesProj/jni/voip/tgcalls/group/GroupInstanceCustomImpl.h @@ -23,7 +23,7 @@ public: void stop(); - void setConnectionMode(GroupConnectionMode connectionMode, bool keepBroadcastIfWasEnabled); + void setConnectionMode(GroupConnectionMode connectionMode, bool keepBroadcastIfWasEnabled, bool isUnifiedBroadcast); void emitJoinPayload(std::function completion); void setJoinResponsePayload(std::string const &payload); @@ -38,6 +38,7 @@ public: void setAudioInputDevice(std::string id); void addExternalAudioSamples(std::vector &&samples); + void addOutgoingVideoOutput(std::weak_ptr> sink); void addIncomingVideoOutput(std::string const &endpointId, std::weak_ptr> sink); void setVolume(uint32_t ssrc, double volume); diff --git a/TMessagesProj/jni/voip/tgcalls/group/GroupInstanceImpl.h b/TMessagesProj/jni/voip/tgcalls/group/GroupInstanceImpl.h index a0c006421..83dd540f2 100644 --- a/TMessagesProj/jni/voip/tgcalls/group/GroupInstanceImpl.h +++ b/TMessagesProj/jni/voip/tgcalls/group/GroupInstanceImpl.h @@ -158,6 +158,7 @@ struct GroupInstanceDescriptor { std::function(std::shared_ptr, int64_t, int64_t, int32_t, VideoChannelDescription::Quality, std::function)> requestVideoBroadcastPart; int outgoingAudioBitrateKbit{32}; bool disableOutgoingAudioProcessing{false}; + bool disableAudioInput{false}; VideoContentType videoContentType{VideoContentType::None}; bool initialEnableNoiseSuppression{false}; std::vector videoCodecPreferences; @@ -179,7 +180,7 @@ public: virtual void stop() = 0; - virtual void setConnectionMode(GroupConnectionMode connectionMode, bool keepBroadcastIfWasEnabled) = 0; + virtual void setConnectionMode(GroupConnectionMode connectionMode, bool keepBroadcastIfWasEnabled, bool isUnifiedBroadcast) = 0; virtual void emitJoinPayload(std::function completion) = 0; virtual void setJoinResponsePayload(std::string const &payload) = 0; @@ -194,6 +195,7 @@ public: virtual void setAudioInputDevice(std::string id) = 0; virtual void addExternalAudioSamples(std::vector &&samples) = 0; + virtual void addOutgoingVideoOutput(std::weak_ptr> sink) = 0; virtual void addIncomingVideoOutput(std::string const &endpointId, std::weak_ptr> sink) = 0; virtual void setVolume(uint32_t ssrc, double volume) = 0; diff --git a/TMessagesProj/jni/voip/tgcalls/group/GroupNetworkManager.cpp b/TMessagesProj/jni/voip/tgcalls/group/GroupNetworkManager.cpp index 183a4d926..131ccb879 100644 --- a/TMessagesProj/jni/voip/tgcalls/group/GroupNetworkManager.cpp +++ b/TMessagesProj/jni/voip/tgcalls/group/GroupNetworkManager.cpp @@ -13,8 +13,8 @@ #include "p2p/base/dtls_transport_factory.h" #include "pc/dtls_srtp_transport.h" #include "pc/dtls_transport.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "media/sctp/sctp_transport_factory.h" -#include "modules/rtp_rtcp/source/rtp_utility.h" #include "modules/rtp_rtcp/source/byte_io.h" #include "platform/PlatformInterface.h" #include "TurnCustomizerImpl.h" @@ -324,7 +324,7 @@ _audioActivityUpdated(audioActivityUpdated) { _networkMonitorFactory = PlatformInterface::SharedInstance()->createNetworkMonitorFactory(); - _socketFactory.reset(new rtc::BasicPacketSocketFactory(_threads->getNetworkThread())); + _socketFactory.reset(new rtc::BasicPacketSocketFactory(_threads->getNetworkThread()->socketserver())); _networkManager = std::make_unique(_networkMonitorFactory.get()); _asyncResolverFactory = std::make_unique(); @@ -362,9 +362,9 @@ void GroupNetworkManager::resetDtlsSrtpTransport() { _transportChannel.reset(new cricket::P2PTransportChannel("transport", 0, _portAllocator.get(), _asyncResolverFactory.get(), nullptr)); cricket::IceConfig iceConfig; - iceConfig.continual_gathering_policy = cricket::GATHER_ONCE; + iceConfig.continual_gathering_policy = cricket::GATHER_CONTINUALLY; iceConfig.prioritize_most_likely_candidate_pairs = true; - iceConfig.regather_on_failed_networks_interval = 8000; + iceConfig.regather_on_failed_networks_interval = 2000; _transportChannel->SetIceConfig(iceConfig); cricket::IceParameters localIceParameters( @@ -509,7 +509,7 @@ webrtc::RtpTransport *GroupNetworkManager::getRtpTransport() { void GroupNetworkManager::checkConnectionTimeout() { const auto weak = std::weak_ptr(shared_from_this()); - _threads->getNetworkThread()->PostDelayedTask(RTC_FROM_HERE, [weak]() { + _threads->getNetworkThread()->PostDelayedTask([weak]() { auto strong = weak.lock(); if (!strong) { return; @@ -553,7 +553,7 @@ void GroupNetworkManager::DtlsReadyToSend(bool isReadyToSend) { if (isReadyToSend) { const auto weak = std::weak_ptr(shared_from_this()); - _threads->getNetworkThread()->PostTask(RTC_FROM_HERE, [weak]() { + _threads->getNetworkThread()->PostTask([weak]() { const auto strong = weak.lock(); if (!strong) { return; diff --git a/TMessagesProj/jni/voip/tgcalls/group/GroupNetworkManager.h b/TMessagesProj/jni/voip/tgcalls/group/GroupNetworkManager.h index eacefae0b..059ca4785 100644 --- a/TMessagesProj/jni/voip/tgcalls/group/GroupNetworkManager.h +++ b/TMessagesProj/jni/voip/tgcalls/group/GroupNetworkManager.h @@ -11,6 +11,7 @@ #include "rtc_base/network_monitor_factory.h" #include "api/candidate.h" #include "media/base/media_channel.h" +#include "pc/sctp_transport.h" #include "rtc_base/ssl_fingerprint.h" #include "pc/sctp_data_channel.h" diff --git a/TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.cpp b/TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.cpp index eedd00098..660ce63b1 100644 --- a/TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.cpp +++ b/TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.cpp @@ -11,7 +11,7 @@ #include "rtc_base/synchronization/mutex.h" #include "common_audio/ring_buffer.h" #include "modules/audio_mixer/frame_combiner.h" -#include "modules/audio_processing/agc2/vad_with_level.h" +#include "modules/audio_processing/agc2/vad_wrapper.h" #include "modules/audio_processing/audio_buffer.h" #include "api/video/video_sink_interface.h" #include "audio/utility/audio_frame_operations.h" @@ -23,6 +23,9 @@ namespace { struct PendingAudioSegmentData { }; +struct PendingUnifiedSegmentData { +}; + struct PendingVideoSegmentData { int32_t channelId = 0; VideoChannelDescription::Quality quality = VideoChannelDescription::Quality::Thumbnail; @@ -42,7 +45,7 @@ struct PendingMediaSegmentPartResult { }; struct PendingMediaSegmentPart { - absl::variant typeData; + absl::variant typeData; int64_t minRequestTimestamp = 0; @@ -64,11 +67,21 @@ struct VideoSegment { std::shared_ptr pendingVideoQualityUpdatePart; }; +struct UnifiedSegment { + std::shared_ptr videoPart; + double lastFramePts = -1.0; + int _displayedFrames = 0; + bool isPlaying = false; +}; + struct MediaSegment { int64_t timestamp = 0; int64_t duration = 0; std::shared_ptr audio; + AudioStreamingPartPersistentDecoder audioDecoder; + std::shared_ptr unifiedAudio; std::vector> video; + std::vector> unified; }; class SampleRingBuffer { @@ -138,12 +151,12 @@ public: class CombinedVad { private: - std::unique_ptr _vadWithLevel; + webrtc::VoiceActivityDetectorWrapper _vadWithLevel; VadHistory _history; public: - CombinedVad() { - _vadWithLevel = std::make_unique(500, webrtc::GetAvailableCpuFeatures()); + CombinedVad() : + _vadWithLevel(500, webrtc::GetAvailableCpuFeatures(), webrtc::AudioProcessing::kSampleRate48kHz) { } ~CombinedVad() { @@ -153,7 +166,7 @@ public: if (buffer->num_channels() <= 0) { return _history.update(0.0f); } - webrtc::AudioFrameView frameView(buffer->channels(), buffer->num_channels(), buffer->num_frames()); + webrtc::AudioFrameView frameView(buffer->channels(), (int)(buffer->num_channels()), (int)(buffer->num_frames())); float peak = 0.0f; for (const auto &x : frameView.channel(0)) { peak = std::max(std::fabs(x), peak); @@ -162,9 +175,9 @@ public: return _history.update(false); } - auto result = _vadWithLevel->AnalyzeFrame(frameView); + auto result = _vadWithLevel.Analyze(frameView); - return _history.update(result.speech_probability); + return _history.update(result); } bool update() { @@ -226,6 +239,7 @@ class StreamingMediaContextPrivate : public std::enable_shared_from_this(shared_from_this()); - _threads->getMediaThread()->PostDelayedTask(RTC_FROM_HERE, [weak]() { + _threads->getMediaThread()->PostDelayedTask([weak]() { auto strong = weak.lock(); if (!strong) { return; @@ -308,16 +322,38 @@ public: } } + for (auto &videoSegment : segment->unified) { + videoSegment->isPlaying = true; + + auto frame = videoSegment->videoPart->getFrameAtRelativeTimestamp(relativeTimestamp); + if (frame) { + if (videoSegment->lastFramePts != frame->pts) { + videoSegment->lastFramePts = frame->pts; + videoSegment->_displayedFrames += 1; + + auto sinkList = _videoSinks.find("unified"); + if (sinkList != _videoSinks.end()) { + for (const auto &weakSink : sinkList->second) { + auto sink = weakSink.lock(); + if (sink) { + sink->OnFrame(frame->frame); + } + } + } + } + } + } + if (segment->audio) { const auto available = [&] { _audioDataMutex.Lock(); - const auto result = (_audioRingBuffer.availableForWriting() >= 480); + const auto result = (_audioRingBuffer.availableForWriting() >= 480 * _audioRingBufferNumChannels); _audioDataMutex.Unlock(); return result; }; while (available()) { - auto audioChannels = segment->audio->get10msPerChannel(); + auto audioChannels = segment->audio->get10msPerChannel(segment->audioDecoder); if (audioChannels.empty()) { break; } @@ -348,7 +384,90 @@ public: } _audioDataMutex.Lock(); - _audioRingBuffer.write(frameOut.data(), frameOut.samples_per_channel()); + if (frameOut.num_channels() == _audioRingBufferNumChannels) { + _audioRingBuffer.write(frameOut.data(), frameOut.samples_per_channel() * frameOut.num_channels()); + } else { + if (_stereoShuffleBuffer.size() < frameOut.samples_per_channel() * _audioRingBufferNumChannels) { + _stereoShuffleBuffer.resize(frameOut.samples_per_channel() * _audioRingBufferNumChannels); + } + for (int i = 0; i < frameOut.samples_per_channel(); i++) { + for (int j = 0; j < _audioRingBufferNumChannels; j++) { + _stereoShuffleBuffer[i * _audioRingBufferNumChannels + j] = frameOut.data()[i]; + } + } + _audioRingBuffer.write(_stereoShuffleBuffer.data(), frameOut.samples_per_channel() * _audioRingBufferNumChannels); + } + _audioDataMutex.Unlock(); + } + } else if (segment->unifiedAudio) { + const auto available = [&] { + _audioDataMutex.Lock(); + const auto result = (_audioRingBuffer.availableForWriting() >= 480); + _audioDataMutex.Unlock(); + + return result; + }; + while (available()) { + auto audioChannels = segment->unifiedAudio->getAudio10msPerChannel(_persistentAudioDecoder); + if (audioChannels.empty()) { + break; + } + + if (audioChannels[0].numSamples < 480) { + RTC_LOG(LS_INFO) << "render: got less than 10ms of audio data (" << audioChannels[0].numSamples << " samples)"; + } + + int numChannels = std::min(2, (int)audioChannels.size()); + + webrtc::AudioFrame frameOut; + + if (numChannels == 1) { + frameOut.UpdateFrame(0, audioChannels[0].pcmData.data(), audioChannels[0].pcmData.size(), 48000, webrtc::AudioFrame::SpeechType::kNormalSpeech, webrtc::AudioFrame::VADActivity::kVadActive, numChannels); + } else { + bool skipFrame = false; + int numSamples = (int)audioChannels[0].pcmData.size(); + for (int i = 1; i < numChannels; i++) { + if (audioChannels[i].pcmData.size() != numSamples) { + skipFrame = true; + break; + } + } + if (skipFrame) { + break; + } + if (_stereoShuffleBuffer.size() < numChannels * numSamples) { + _stereoShuffleBuffer.resize(numChannels * numSamples); + } + for (int i = 0; i < numSamples; i++) { + for (int j = 0; j < numChannels; j++) { + _stereoShuffleBuffer[i * numChannels + j] = audioChannels[0].pcmData[i]; + } + } + frameOut.UpdateFrame(0, _stereoShuffleBuffer.data(), numSamples, 48000, webrtc::AudioFrame::SpeechType::kNormalSpeech, webrtc::AudioFrame::VADActivity::kVadActive, numChannels); + } + + auto volumeIt = _volumeBySsrc.find(1); + if (volumeIt != _volumeBySsrc.end()) { + double outputGain = volumeIt->second; + if (outputGain < 0.99f || outputGain > 1.01f) { + webrtc::AudioFrameOperations::ScaleWithSat(outputGain, &frameOut); + } + } + + _audioDataMutex.Lock(); + if (frameOut.num_channels() == _audioRingBufferNumChannels) { + _audioRingBuffer.write(frameOut.data(), frameOut.samples_per_channel() * frameOut.num_channels()); + } else { + if (_stereoShuffleBuffer.size() < frameOut.samples_per_channel() * _audioRingBufferNumChannels) { + _stereoShuffleBuffer.resize(frameOut.samples_per_channel() * _audioRingBufferNumChannels); + } + for (int i = 0; i < frameOut.samples_per_channel(); i++) { + for (int j = 0; j < _audioRingBufferNumChannels; j++) { + _stereoShuffleBuffer[i * _audioRingBufferNumChannels + j] = frameOut.data()[i]; + } + } + _audioRingBuffer.write(_stereoShuffleBuffer.data(), frameOut.samples_per_channel() * _audioRingBufferNumChannels); + } _audioDataMutex.Unlock(); } } @@ -397,31 +516,31 @@ public: _updateAudioLevel(ssrc, vadResult.first, vadResult.second); } - void getAudio(int16_t *audio_samples, const size_t num_samples, const size_t num_channels, const uint32_t samples_per_sec) { + void getAudio(int16_t *audio_samples, size_t num_samples, size_t num_channels, uint32_t samples_per_sec) { int16_t *buffer = nullptr; - if (num_channels == 1) { + if (num_channels == _audioRingBufferNumChannels) { buffer = audio_samples; } else { - if (_tempAudioBuffer.size() < num_samples) { - _tempAudioBuffer.resize(num_samples); + if (_tempAudioBuffer.size() < num_samples * _audioRingBufferNumChannels) { + _tempAudioBuffer.resize(num_samples * _audioRingBufferNumChannels); } buffer = _tempAudioBuffer.data(); } _audioDataMutex.Lock(); - size_t readSamples = _audioRingBuffer.read(buffer, num_samples); + size_t readSamples = _audioRingBuffer.read(buffer, num_samples * _audioRingBufferNumChannels); _audioDataMutex.Unlock(); - if (num_channels != 1) { - for (size_t sampleIndex = 0; sampleIndex < readSamples; sampleIndex++) { + if (num_channels != _audioRingBufferNumChannels) { + for (size_t sampleIndex = 0; sampleIndex < readSamples / _audioRingBufferNumChannels; sampleIndex++) { for (size_t channelIndex = 0; channelIndex < num_channels; channelIndex++) { - audio_samples[sampleIndex * num_channels + channelIndex] = _tempAudioBuffer[sampleIndex]; + audio_samples[sampleIndex * num_channels + channelIndex] = _tempAudioBuffer[sampleIndex * _audioRingBufferNumChannels + 0]; } } } - if (readSamples < num_samples) { - memset(audio_samples + readSamples * num_channels, 0, (num_samples - readSamples) * num_channels * sizeof(int16_t)); + if (readSamples < num_samples * num_channels) { + memset(audio_samples + readSamples, 0, (num_samples * num_channels - readSamples) * sizeof(int16_t)); } } @@ -449,10 +568,49 @@ public: void requestSegmentsIfNeeded() { while (true) { - if (_nextSegmentTimestamp == 0) { - if (_pendingSegments.size() >= 1) { - break; + if (_nextSegmentTimestamp == -1) { + if (!_pendingRequestTimeTask && _pendingRequestTimeDelayTaskId == 0) { + const auto weak = std::weak_ptr(shared_from_this()); + _pendingRequestTimeTask = _requestCurrentTime([weak, threads = _threads](int64_t timestamp) { + threads->getMediaThread()->PostTask([weak, timestamp]() { + auto strong = weak.lock(); + if (!strong) { + return; + } + + strong->_pendingRequestTimeTask.reset(); + + int64_t adjustedTimestamp = 0; + if (timestamp > 0) { + adjustedTimestamp = (int64_t)((timestamp / strong->_segmentDuration * strong->_segmentDuration) - strong->_segmentBufferDuration); + } + + if (adjustedTimestamp <= 0) { + int taskId = strong->_nextPendingRequestTimeDelayTaskId; + strong->_pendingRequestTimeDelayTaskId = taskId; + strong->_nextPendingRequestTimeDelayTaskId++; + + strong->_threads->getMediaThread()->PostDelayedTask([weak, taskId]() { + auto strong = weak.lock(); + if (!strong) { + return; + } + if (strong->_pendingRequestTimeDelayTaskId != taskId) { + return; + } + + strong->_pendingRequestTimeDelayTaskId = 0; + + strong->requestSegmentsIfNeeded(); + }, 1000); + } else { + strong->_nextSegmentTimestamp = adjustedTimestamp; + strong->requestSegmentsIfNeeded(); + } + }); + }); } + break; } else { int64_t availableAndRequestedSegmentsDuration = 0; availableAndRequestedSegmentsDuration += getAvailableBufferDuration(); @@ -466,12 +624,16 @@ public: auto pendingSegment = std::make_shared(); pendingSegment->timestamp = _nextSegmentTimestamp; - if (_nextSegmentTimestamp != 0) { + if (_nextSegmentTimestamp != -1) { _nextSegmentTimestamp += _segmentDuration; } auto audio = std::make_shared(); - audio->typeData = PendingAudioSegmentData(); + if (_isUnifiedBroadcast) { + audio->typeData = PendingUnifiedSegmentData(); + } else { + audio->typeData = PendingAudioSegmentData(); + } audio->minRequestTimestamp = 0; pendingSegment->parts.push_back(audio); @@ -491,7 +653,7 @@ public: _pendingSegments.push_back(pendingSegment); - if (_nextSegmentTimestamp == 0) { + if (_nextSegmentTimestamp == -1) { break; } } @@ -556,7 +718,7 @@ public: auto result = strongSegment->pendingVideoQualityUpdatePart->result; if (result) { - strongSegment->part = std::make_shared(std::move(result->data)); + strongSegment->part = std::make_shared(std::move(result->data), VideoStreamingPart::ContentType::Video); } strongSegment->pendingVideoQualityUpdatePart.reset(); @@ -597,9 +759,6 @@ public: if (!part->result && !part->task) { if (part->minRequestTimestamp != 0) { - if (i != 0) { - continue; - } if (part->minRequestTimestamp > absoluteTimestamp) { minDelayedRequestTimeout = std::min(minDelayedRequestTimeout, part->minRequestTimestamp - absoluteTimestamp); @@ -611,7 +770,7 @@ public: const auto weakPart = std::weak_ptr(part); std::function handleResult = [weak, weakSegment, weakPart, threads = _threads, segmentTimestamp](BroadcastPart &&part) { - threads->getMediaThread()->PostTask(RTC_FROM_HERE, [weak, weakSegment, weakPart, part = std::move(part), segmentTimestamp]() mutable { + threads->getMediaThread()->PostTask([weak, weakSegment, weakPart, part = std::move(part), segmentTimestamp]() mutable { auto strong = weak.lock(); if (!strong) { return; @@ -631,14 +790,14 @@ public: switch (part.status) { case BroadcastPart::Status::Success: { pendingPart->result = std::make_shared(std::move(part.data)); - if (strong->_nextSegmentTimestamp == 0) { + if (strong->_nextSegmentTimestamp == -1) { strong->_nextSegmentTimestamp = part.timestampMilliseconds + strong->_segmentDuration; } strong->checkPendingSegments(); break; } case BroadcastPart::Status::NotReady: { - if (segmentTimestamp == 0) { + if (segmentTimestamp == 0 && !strong->_isUnifiedBroadcast) { int64_t responseTimestampMilliseconds = (int64_t)(part.responseTimestamp * 1000.0); int64_t responseTimestampBoundary = (responseTimestampMilliseconds / strong->_segmentDuration) * strong->_segmentDuration; @@ -653,10 +812,15 @@ public: break; } case BroadcastPart::Status::ResyncNeeded: { - int64_t responseTimestampMilliseconds = (int64_t)(part.responseTimestamp * 1000.0); - int64_t responseTimestampBoundary = (responseTimestampMilliseconds / strong->_segmentDuration) * strong->_segmentDuration; + if (strong->_isUnifiedBroadcast) { + strong->_nextSegmentTimestamp = -1; + } else { + int64_t responseTimestampMilliseconds = (int64_t)(part.responseTimestamp * 1000.0); + int64_t responseTimestampBoundary = (responseTimestampMilliseconds / strong->_segmentDuration) * strong->_segmentDuration; + + strong->_nextSegmentTimestamp = responseTimestampBoundary; + } - strong->_nextSegmentTimestamp = responseTimestampBoundary; strong->discardAllPendingSegments(); strong->requestSegmentsIfNeeded(); strong->checkPendingSegments(); @@ -676,6 +840,8 @@ public: part->task = _requestAudioBroadcastPart(_platformContext, segmentTimestamp, _segmentDuration, handleResult); } else if (const auto videoData = absl::get_if(typeData)) { part->task = _requestVideoBroadcastPart(_platformContext, segmentTimestamp, _segmentDuration, videoData->channelId, videoData->quality, handleResult); + } else if (const auto unifiedData = absl::get_if(typeData)) { + part->task = _requestVideoBroadcastPart(_platformContext, segmentTimestamp, _segmentDuration, 1, VideoChannelDescription::Quality::Full, handleResult); } } } @@ -687,7 +853,7 @@ public: for (auto &part : pendingSegment->parts) { const auto typeData = &part->typeData; if (const auto audioData = absl::get_if(typeData)) { - segment->audio = std::make_shared(std::move(part->result->data)); + segment->audio = std::make_shared(std::move(part->result->data), "ogg", false); _currentEndpointMapping = segment->audio->getEndpointMapping(); } else if (const auto videoData = absl::get_if(typeData)) { auto videoSegment = std::make_shared(); @@ -695,8 +861,17 @@ public: if (part->result->data.empty()) { RTC_LOG(LS_INFO) << "Video part " << segment->timestamp << " is empty"; } - videoSegment->part = std::make_shared(std::move(part->result->data)); + videoSegment->part = std::make_shared(std::move(part->result->data), VideoStreamingPart::ContentType::Video); segment->video.push_back(videoSegment); + } else if (const auto videoData = absl::get_if(typeData)) { + auto unifiedSegment = std::make_shared(); + if (part->result->data.empty()) { + RTC_LOG(LS_INFO) << "Unified part " << segment->timestamp << " is empty"; + } + std::vector dataCopy = part->result->data; + unifiedSegment->videoPart = std::make_shared(std::move(part->result->data), VideoStreamingPart::ContentType::Video); + segment->unified.push_back(unifiedSegment); + segment->unifiedAudio = std::make_shared(std::move(dataCopy), VideoStreamingPart::ContentType::Audio); } } _availableSegments.push_back(segment); @@ -710,7 +885,7 @@ public: if (minDelayedRequestTimeout < INT32_MAX) { const auto weak = std::weak_ptr(shared_from_this()); - _threads->getMediaThread()->PostDelayedTask(RTC_FROM_HERE, [weak]() { + _threads->getMediaThread()->PostDelayedTask([weak]() { auto strong = weak.lock(); if (!strong) { return; @@ -729,7 +904,7 @@ public: const auto weakPart = std::weak_ptr(part); std::function handleResult = [weak, weakPart, threads = _threads, completion](BroadcastPart &&part) { - threads->getMediaThread()->PostTask(RTC_FROM_HERE, [weak, weakPart, part = std::move(part), completion]() mutable { + threads->getMediaThread()->PostTask([weak, weakPart, part = std::move(part), completion]() mutable { auto strong = weak.lock(); if (!strong) { return; @@ -768,6 +943,8 @@ public: part->task = _requestAudioBroadcastPart(_platformContext, segmentTimestamp, _segmentDuration, handleResult); } else if (const auto videoData = absl::get_if(typeData)) { part->task = _requestVideoBroadcastPart(_platformContext, segmentTimestamp, _segmentDuration, videoData->channelId, videoData->quality, handleResult); + } else if (const auto unifiedData = absl::get_if(typeData)) { + part->task = _requestVideoBroadcastPart(_platformContext, segmentTimestamp, _segmentDuration, 1, VideoChannelDescription::Quality::Full, handleResult); } } @@ -776,15 +953,10 @@ public: } void setActiveVideoChannels(std::vector const &videoChannels) { - _activeVideoChannels = videoChannels; - -/*#if DEBUG - for (auto &updatedVideoChannel : _activeVideoChannels) { - if (updatedVideoChannel.quality == VideoChannelDescription::Quality::Medium) { - updatedVideoChannel.quality = VideoChannelDescription::Quality::Thumbnail; - } + if (_isUnifiedBroadcast) { + return; } -#endif*/ + _activeVideoChannels = videoChannels; for (const auto &updatedVideoChannel : _activeVideoChannels) { for (const auto &segment : _availableSegments) { @@ -809,6 +981,7 @@ public: private: std::shared_ptr _threads; + bool _isUnifiedBroadcast = false; std::function(std::function)> _requestCurrentTime; std::function(std::shared_ptr, int64_t, int64_t, std::function)> _requestAudioBroadcastPart; std::function(std::shared_ptr, int64_t, int64_t, int32_t, VideoChannelDescription::Quality, std::function)> _requestVideoBroadcastPart; @@ -817,19 +990,26 @@ private: const int _segmentDuration = 1000; const int _segmentBufferDuration = 2000; - int64_t _nextSegmentTimestamp = 0; + int64_t _nextSegmentTimestamp = -1; absl::optional _waitForBufferredMillisecondsBeforeRendering; std::vector> _availableSegments; + AudioStreamingPartPersistentDecoder _persistentAudioDecoder; + + std::shared_ptr _pendingRequestTimeTask; + int _pendingRequestTimeDelayTaskId = 0; + int _nextPendingRequestTimeDelayTaskId = 0; std::vector> _pendingSegments; int64_t _playbackReferenceTimestamp = 0; - const size_t _audioDataRingBufferMaxSize = 4800; + const int _audioRingBufferNumChannels = 2; + const size_t _audioDataRingBufferMaxSize = 4800 * 2; webrtc::Mutex _audioDataMutex; SampleRingBuffer _audioRingBuffer; std::vector _tempAudioBuffer; + std::vector _stereoShuffleBuffer; webrtc::FrameCombiner _audioFrameCombiner; std::map> _audioVadMap; @@ -838,7 +1018,7 @@ private: std::map>>> _videoSinks; std::map _currentEndpointMapping; - + std::shared_ptr _platformContext; }; diff --git a/TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.h b/TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.h index 928eddcc8..a8f5c7aab 100644 --- a/TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.h +++ b/TMessagesProj/jni/voip/tgcalls/group/StreamingMediaContext.h @@ -24,6 +24,7 @@ public: public: struct StreamingMediaContextArguments { std::shared_ptr threads; + bool isUnifiedBroadcast = false; std::function(std::function)> requestCurrentTime; std::function(std::shared_ptr, int64_t, int64_t, std::function)> requestAudioBroadcastPart; std::function(std::shared_ptr, int64_t, int64_t, int32_t, VideoChannelDescription::Quality, std::function)> requestVideoBroadcastPart; diff --git a/TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.cpp b/TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.cpp index 591a523e0..84ecad22a 100644 --- a/TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.cpp +++ b/TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.cpp @@ -4,11 +4,7 @@ #include "rtc_base/third_party/base64/base64.h" #include "api/video/i420_buffer.h" -extern "C" { -#include -#include -#include -} +#include "AVIOContextImpl.h" #include #include @@ -18,63 +14,6 @@ namespace tgcalls { namespace { -class AVIOContextImpl { -public: - AVIOContextImpl(std::vector &&fileData) : - _fileData(std::move(fileData)) { - _buffer.resize(4 * 1024); - _context = avio_alloc_context(_buffer.data(), (int)_buffer.size(), 0, this, &AVIOContextImpl::read, NULL, &AVIOContextImpl::seek); - } - - ~AVIOContextImpl() { - av_free(_context); - } - - static int read(void *opaque, unsigned char *buffer, int bufferSize) { - AVIOContextImpl *instance = static_cast(opaque); - - int bytesToRead = std::min(bufferSize, ((int)instance->_fileData.size()) - instance->_fileReadPosition); - if (bytesToRead < 0) { - bytesToRead = 0; - } - - if (bytesToRead > 0) { - memcpy(buffer, instance->_fileData.data() + instance->_fileReadPosition, bytesToRead); - instance->_fileReadPosition += bytesToRead; - - return bytesToRead; - } else { - return AVERROR_EOF; - } - } - - static int64_t seek(void *opaque, int64_t offset, int whence) { - AVIOContextImpl *instance = static_cast(opaque); - - if (whence == 0x10000) { - return (int64_t)instance->_fileData.size(); - } else { - int64_t seekOffset = std::min(offset, (int64_t)instance->_fileData.size()); - if (seekOffset < 0) { - seekOffset = 0; - } - instance->_fileReadPosition = (int)seekOffset; - return seekOffset; - } - } - - AVIOContext *getContext() { - return _context; - } - -private: - std::vector _fileData; - int _fileReadPosition = 0; - - std::vector _buffer; - AVIOContext *_context = nullptr; -}; - class MediaDataPacket { public: MediaDataPacket() : _packet(av_packet_alloc()) { @@ -312,8 +251,12 @@ absl::optional consumeVideoStreamInfo(std::vector &dat } if (const auto eventCount = readInt32(data, offset)) { - if (const auto event = readVideoStreamEvent(data, offset)) { - info.events.push_back(event.value()); + if (eventCount > 0) { + if (const auto event = readVideoStreamEvent(data, offset)) { + info.events.push_back(event.value()); + } else { + return absl::nullopt; + } } else { return absl::nullopt; } @@ -499,7 +442,7 @@ public: _frameIndex++; return convertedFrame; } - } else if (status == -35) { + } else if (status == AVERROR(EAGAIN)) { // more data needed } else { _didReadToEnd = true; @@ -552,14 +495,29 @@ private: class VideoStreamingPartState { public: - VideoStreamingPartState(std::vector &&data) { + VideoStreamingPartState(std::vector &&data, VideoStreamingPart::ContentType contentType) { _videoStreamInfo = consumeVideoStreamInfo(data); if (!_videoStreamInfo) { return; } for (size_t i = 0; i < _videoStreamInfo->events.size(); i++) { - std::vector dataSlice(data.begin() + _videoStreamInfo->events[i].offset, i == (_videoStreamInfo->events.size() - 1) ? data.end() : (data.begin() + _videoStreamInfo->events[i + 1].offset)); + if (_videoStreamInfo->events[i].offset < 0) { + continue; + } + size_t endOffset = 0; + if (i == _videoStreamInfo->events.size() - 1) { + endOffset = data.size(); + } else { + endOffset = _videoStreamInfo->events[i + 1].offset; + } + if (endOffset <= _videoStreamInfo->events[i].offset) { + continue; + } + if (endOffset > data.size()) { + continue; + } + std::vector dataSlice(data.begin() + _videoStreamInfo->events[i].offset, data.begin() + endOffset); webrtc::VideoRotation rotation = webrtc::VideoRotation::kVideoRotation_0; switch (_videoStreamInfo->events[i].rotation) { case 0: { @@ -582,8 +540,24 @@ public: break; } } - auto part = std::make_unique(_videoStreamInfo->events[i].endpointId, rotation, std::move(dataSlice), _videoStreamInfo->container); - _parsedParts.push_back(std::move(part)); + + switch (contentType) { + case VideoStreamingPart::ContentType::Audio: { + auto part = std::make_unique(std::move(dataSlice), _videoStreamInfo->container, true); + _parsedAudioParts.push_back(std::move(part)); + + break; + } + case VideoStreamingPart::ContentType::Video: { + auto part = std::make_unique(_videoStreamInfo->events[i].endpointId, rotation, std::move(dataSlice), _videoStreamInfo->container); + _parsedVideoParts.push_back(std::move(part)); + + break; + } + default: { + break; + } + } } } @@ -593,13 +567,13 @@ public: absl::optional getFrameAtRelativeTimestamp(double timestamp) { while (true) { if (!_currentFrame) { - if (!_parsedParts.empty()) { - auto result = _parsedParts[0]->getNextFrame(); + if (!_parsedVideoParts.empty()) { + auto result = _parsedVideoParts[0]->getNextFrame(); if (result) { _currentFrame = result; _relativeTimestamp += result->duration; } else { - _parsedParts.erase(_parsedParts.begin()); + _parsedVideoParts.erase(_parsedVideoParts.begin()); continue; } } @@ -618,23 +592,49 @@ public: } absl::optional getActiveEndpointId() const { - if (!_parsedParts.empty()) { - return _parsedParts[0]->endpointId(); + if (!_parsedVideoParts.empty()) { + return _parsedVideoParts[0]->endpointId(); } else { return absl::nullopt; } } + int getAudioRemainingMilliseconds() { + while (!_parsedAudioParts.empty()) { + auto firstPartResult = _parsedAudioParts[0]->getRemainingMilliseconds(); + if (firstPartResult <= 0) { + _parsedAudioParts.erase(_parsedAudioParts.begin()); + } else { + return firstPartResult; + } + } + return 0; + } + + std::vector getAudio10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder) { + while (!_parsedAudioParts.empty()) { + auto firstPartResult = _parsedAudioParts[0]->get10msPerChannel(persistentDecoder); + if (firstPartResult.empty()) { + _parsedAudioParts.erase(_parsedAudioParts.begin()); + } else { + return firstPartResult; + } + } + return {}; + } + private: absl::optional _videoStreamInfo; - std::vector> _parsedParts; + std::vector> _parsedVideoParts; absl::optional _currentFrame; double _relativeTimestamp = 0.0; + + std::vector> _parsedAudioParts; }; -VideoStreamingPart::VideoStreamingPart(std::vector &&data) { +VideoStreamingPart::VideoStreamingPart(std::vector &&data, VideoStreamingPart::ContentType contentType) { if (!data.empty()) { - _state = new VideoStreamingPartState(std::move(data)); + _state = new VideoStreamingPartState(std::move(data), contentType); } } @@ -656,4 +656,15 @@ absl::optional VideoStreamingPart::getActiveEndpointId() const { : absl::nullopt; } +int VideoStreamingPart::getAudioRemainingMilliseconds() { + return _state + ? _state->getAudioRemainingMilliseconds() + : 0; +} +std::vector VideoStreamingPart::getAudio10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder) { + return _state + ? _state->getAudio10msPerChannel(persistentDecoder) + : std::vector(); +} + } diff --git a/TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.h b/TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.h index 330b1fdc0..87f7f15d1 100644 --- a/TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.h +++ b/TMessagesProj/jni/voip/tgcalls/group/VideoStreamingPart.h @@ -8,6 +8,9 @@ #include "api/video/video_frame.h" #include "absl/types/optional.h" +#include "AudioStreamingPart.h" +#include "AudioStreamingPartInternal.h" + namespace tgcalls { class VideoStreamingPartState; @@ -30,7 +33,13 @@ struct VideoStreamingPartFrame { class VideoStreamingPart { public: - explicit VideoStreamingPart(std::vector &&data); + enum class ContentType { + Audio, + Video + }; + +public: + explicit VideoStreamingPart(std::vector &&data, VideoStreamingPart::ContentType contentType); ~VideoStreamingPart(); VideoStreamingPart(const VideoStreamingPart&) = delete; @@ -44,6 +53,9 @@ public: absl::optional getFrameAtRelativeTimestamp(double timestamp); absl::optional getActiveEndpointId() const; + int getAudioRemainingMilliseconds(); + std::vector getAudio10msPerChannel(AudioStreamingPartPersistentDecoder &persistentDecoder); + private: VideoStreamingPartState *_state = nullptr; }; diff --git a/TMessagesProj/jni/voip/tgcalls/platform/PlatformInterface.h b/TMessagesProj/jni/voip/tgcalls/platform/PlatformInterface.h index e3432d089..b1a588413 100644 --- a/TMessagesProj/jni/voip/tgcalls/platform/PlatformInterface.h +++ b/TMessagesProj/jni/voip/tgcalls/platform/PlatformInterface.h @@ -303,8 +303,8 @@ public: virtual std::unique_ptr createNetworkMonitorFactory() { return nullptr; } - - virtual std::unique_ptr makeVideoEncoderFactory(std::shared_ptr platformContext) = 0; + + virtual std::unique_ptr makeVideoEncoderFactory(std::shared_ptr platformContext, bool preferHardwareEncoding = false, bool isScreencast = false) = 0; virtual std::unique_ptr makeVideoDecoderFactory(std::shared_ptr platformContext) = 0; virtual bool supportsEncoding(const std::string &codecName, std::shared_ptr platformContext) = 0; virtual rtc::scoped_refptr makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread, bool screencapture) = 0; @@ -314,6 +314,8 @@ public: return new rtc::RefCountedObject(module); } +public: + bool preferX264 = false; }; std::unique_ptr CreatePlatformInterface(); diff --git a/TMessagesProj/jni/voip/tgcalls/platform/android/AndroidInterface.cpp b/TMessagesProj/jni/voip/tgcalls/platform/android/AndroidInterface.cpp index 78a8e3b7f..7a968bd90 100644 --- a/TMessagesProj/jni/voip/tgcalls/platform/android/AndroidInterface.cpp +++ b/TMessagesProj/jni/voip/tgcalls/platform/android/AndroidInterface.cpp @@ -17,7 +17,7 @@ #include "sdk/android/native_api/video/video_source.h" #include "api/video_codecs/builtin_video_encoder_factory.h" #include "api/video_codecs/builtin_video_decoder_factory.h" -#include "api/video_track_source_proxy.h" +#include "api/video_track_source_proxy_factory.h" #include "AndroidContext.h" @@ -27,7 +27,7 @@ void AndroidInterface::configurePlatformAudio() { } -std::unique_ptr AndroidInterface::makeVideoEncoderFactory(std::shared_ptr platformContext) { +std::unique_ptr AndroidInterface::makeVideoEncoderFactory(std::shared_ptr platformContext, bool preferHardwareEncoding, bool isScreencast) { JNIEnv *env = webrtc::AttachCurrentThreadIfNeeded(); AndroidContext *context = (AndroidContext *) platformContext.get(); @@ -60,7 +60,7 @@ void AndroidInterface::adaptVideoSource(rtc::scoped_refptr AndroidInterface::makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread, bool screencapture) { JNIEnv *env = webrtc::AttachCurrentThreadIfNeeded(); _source[screencapture ? 1 : 0] = webrtc::CreateJavaVideoSource(env, signalingThread, false, false); - return webrtc::VideoTrackSourceProxy::Create(signalingThread, workerThread, _source[screencapture ? 1 : 0]); + return webrtc::CreateVideoTrackSourceProxy(signalingThread, workerThread, _source[screencapture ? 1 : 0]); } bool AndroidInterface::supportsEncoding(const std::string &codecName, std::shared_ptr platformContext) { diff --git a/TMessagesProj/jni/voip/tgcalls/platform/android/AndroidInterface.h b/TMessagesProj/jni/voip/tgcalls/platform/android/AndroidInterface.h index 32d5fd818..d2b1820e1 100644 --- a/TMessagesProj/jni/voip/tgcalls/platform/android/AndroidInterface.h +++ b/TMessagesProj/jni/voip/tgcalls/platform/android/AndroidInterface.h @@ -10,7 +10,7 @@ namespace tgcalls { class AndroidInterface : public PlatformInterface { public: void configurePlatformAudio() override; - std::unique_ptr makeVideoEncoderFactory(std::shared_ptr platformContext) override; + std::unique_ptr makeVideoEncoderFactory(std::shared_ptr platformContext, bool preferHardwareEncoding = false, bool isScreencast = false) override; std::unique_ptr makeVideoDecoderFactory(std::shared_ptr platformContext) override; bool supportsEncoding(const std::string &codecName, std::shared_ptr platformContext) override; rtc::scoped_refptr makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread, bool screencapture) override; diff --git a/TMessagesProj/jni/voip/tgcalls/platform/fake/FakeInterface.cpp b/TMessagesProj/jni/voip/tgcalls/platform/fake/FakeInterface.cpp index 761b6201c..16a004446 100644 --- a/TMessagesProj/jni/voip/tgcalls/platform/fake/FakeInterface.cpp +++ b/TMessagesProj/jni/voip/tgcalls/platform/fake/FakeInterface.cpp @@ -2,11 +2,11 @@ #include "api/video_codecs/builtin_video_encoder_factory.h" #include "api/video_codecs/builtin_video_decoder_factory.h" -#include "api/video_track_source_proxy.h" +//#include "api/video_track_source_proxy.h" namespace tgcalls { -std::unique_ptr FakeInterface::makeVideoEncoderFactory() { +std::unique_ptr FakeInterface::makeVideoEncoderFactory(bool preferHardwareEncoding, bool isScreencast) { return webrtc::CreateBuiltinVideoEncoderFactory(); } diff --git a/TMessagesProj/jni/voip/tgcalls/platform/fake/FakeInterface.h b/TMessagesProj/jni/voip/tgcalls/platform/fake/FakeInterface.h index c5f084a5c..6bfecde4c 100644 --- a/TMessagesProj/jni/voip/tgcalls/platform/fake/FakeInterface.h +++ b/TMessagesProj/jni/voip/tgcalls/platform/fake/FakeInterface.h @@ -7,7 +7,7 @@ namespace tgcalls { class FakeInterface : public PlatformInterface { public: - std::unique_ptr makeVideoEncoderFactory() override; + std::unique_ptr makeVideoEncoderFactory(bool preferHardwareEncoding, bool isScreencast) override; std::unique_ptr makeVideoDecoderFactory() override; bool supportsEncoding(const std::string &codecName) override; rtc::scoped_refptr makeVideoSource(rtc::Thread *signalingThread, diff --git a/TMessagesProj/jni/voip/tgcalls/reference/InstanceImplReference.cpp b/TMessagesProj/jni/voip/tgcalls/reference/InstanceImplReference.cpp deleted file mode 100644 index 7569c6ce5..000000000 --- a/TMessagesProj/jni/voip/tgcalls/reference/InstanceImplReference.cpp +++ /dev/null @@ -1,1035 +0,0 @@ -#include "InstanceImplReference.h" - -#include -#include "api/scoped_refptr.h" -#include "rtc_base/thread.h" -#include "rtc_base/logging.h" -#include "api/peer_connection_interface.h" -#include "api/task_queue/default_task_queue_factory.h" -#include "media/engine/webrtc_media_engine.h" -#include "api/audio_codecs/builtin_audio_encoder_factory.h" -#include "api/audio_codecs/builtin_audio_decoder_factory.h" -#include "api/rtc_event_log/rtc_event_log_factory.h" -#include "sdk/media_constraints.h" -#include "api/peer_connection_interface.h" -#include "api/video_track_source_proxy.h" -#include "system_wrappers/include/field_trial.h" -#include "api/stats/rtcstats_objects.h" - -#include "ThreadLocalObject.h" -#include "Manager.h" -#include "NetworkManager.h" -#include "VideoCaptureInterfaceImpl.h" -#include "platform/PlatformInterface.h" -#include "LogSinkImpl.h" -#include "StaticThreads.h" - -namespace tgcalls { -namespace { - -VideoCaptureInterfaceObject *GetVideoCaptureAssumingSameThread(VideoCaptureInterface *videoCapture) { - return videoCapture - ? static_cast(videoCapture)->object()->getSyncAssumingSameThread() - : nullptr; -} - -class PeerConnectionObserverImpl : public webrtc::PeerConnectionObserver { -private: - std::function _discoveredIceCandidate; - std::function _connectionStateChanged; - std::function transceiver)> _onTrack; - -public: - PeerConnectionObserverImpl( - std::function discoveredIceCandidate, - std::function connectionStateChanged, - std::function transceiver)> onTrack - ) : - _discoveredIceCandidate(discoveredIceCandidate), - _connectionStateChanged(connectionStateChanged), - _onTrack(onTrack) { - } - - virtual void OnSignalingChange(webrtc::PeerConnectionInterface::SignalingState new_state) { - bool isConnected = false; - if (new_state == webrtc::PeerConnectionInterface::SignalingState::kStable) { - isConnected = true; - } - _connectionStateChanged(isConnected); - } - - virtual void OnAddStream(rtc::scoped_refptr stream) { - } - - virtual void OnRemoveStream(rtc::scoped_refptr stream) { - } - - virtual void OnDataChannel(rtc::scoped_refptr data_channel) { - } - - virtual void OnRenegotiationNeeded() { - } - - virtual void OnIceConnectionChange(webrtc::PeerConnectionInterface::IceConnectionState new_state) { - } - - virtual void OnStandardizedIceConnectionChange(webrtc::PeerConnectionInterface::IceConnectionState new_state) { - } - - virtual void OnConnectionChange(webrtc::PeerConnectionInterface::PeerConnectionState new_state) { - } - - virtual void OnIceGatheringChange(webrtc::PeerConnectionInterface::IceGatheringState new_state) { - } - - virtual void OnIceCandidate(const webrtc::IceCandidateInterface* candidate) { - std::string sdp; - candidate->ToString(&sdp); - _discoveredIceCandidate(sdp, candidate->sdp_mline_index(), candidate->sdp_mid()); - } - - virtual void OnIceCandidateError(const std::string& host_candidate, const std::string& url, int error_code, const std::string& error_text) { - } - - virtual void OnIceCandidateError(const std::string& address, - int port, - const std::string& url, - int error_code, - const std::string& error_text) { - } - - virtual void OnIceCandidatesRemoved(const std::vector& candidates) { - } - - virtual void OnIceConnectionReceivingChange(bool receiving) { - } - - virtual void OnIceSelectedCandidatePairChanged(const cricket::CandidatePairChangeEvent& event) { - } - - virtual void OnAddTrack(rtc::scoped_refptr receiver, const std::vector>& streams) { - } - - virtual void OnTrack(rtc::scoped_refptr transceiver) { - _onTrack(transceiver); - } - - virtual void OnRemoveTrack(rtc::scoped_refptr receiver) { - } - - virtual void OnInterestingUsage(int usage_pattern) { - } -}; - -class RTCStatsCollectorCallbackImpl : public webrtc::RTCStatsCollectorCallback { -public: - RTCStatsCollectorCallbackImpl(std::function &)> completion) : - _completion(completion) { - } - - virtual void OnStatsDelivered(const rtc::scoped_refptr &report) override { - _completion(report); - } - -private: - std::function &)> _completion; -}; - -class CreateSessionDescriptionObserverImpl : public webrtc::CreateSessionDescriptionObserver { -private: - std::function _completion; - -public: - CreateSessionDescriptionObserverImpl(std::function completion) : - _completion(completion) { - } - - virtual void OnSuccess(webrtc::SessionDescriptionInterface* desc) override { - if (desc) { - std::string sdp; - desc->ToString(&sdp); - - _completion(sdp, desc->type()); - } - } - - virtual void OnFailure(webrtc::RTCError error) override { - } -}; - -class SetSessionDescriptionObserverImpl : public webrtc::SetSessionDescriptionObserver { -private: - std::function _completion; - -public: - SetSessionDescriptionObserverImpl(std::function completion) : - _completion(completion) { - } - - virtual void OnSuccess() override { - _completion(); - } - - virtual void OnFailure(webrtc::RTCError error) override { - } -}; - -struct StatsData { - int32_t packetsReceived = 0; - int32_t packetsLost = 0; -}; - -struct IceCandidateData { - std::string sdpMid; - int mid; - std::string sdp; - - IceCandidateData(std::string _sdpMid, int _mid, std::string _sdp) : - sdpMid(_sdpMid), - mid(_mid), - sdp(_sdp) { - } -}; - -} //namespace - -class InstanceImplReferenceInternal final : public std::enable_shared_from_this { -public: - InstanceImplReferenceInternal( - const Descriptor &descriptor - ) : - _encryptionKey(descriptor.encryptionKey), - _rtcServers(descriptor.rtcServers), - _enableP2P(descriptor.config.enableP2P), - _stateUpdated(descriptor.stateUpdated), - _signalBarsUpdated(descriptor.signalBarsUpdated), - _signalingDataEmitted(descriptor.signalingDataEmitted), - _remoteMediaStateUpdated(descriptor.remoteMediaStateUpdated), - _remoteBatteryLevelIsLowUpdated(descriptor.remoteBatteryLevelIsLowUpdated), - _remotePrefferedAspectRatioUpdated(descriptor.remotePrefferedAspectRatioUpdated), - _videoCapture(descriptor.videoCapture), - _state(State::Reconnecting), - _videoState(_videoCapture ? VideoState::Active : VideoState::Inactive), - _platformContext(descriptor.platformContext) { - assert(StaticThreads::getMediaThread()->IsCurrent()); - - rtc::LogMessage::LogToDebug(rtc::LS_INFO); - rtc::LogMessage::SetLogToStderr(false); - - /*webrtc::field_trial::InitFieldTrialsFromString( - "WebRTC-Audio-SendSideBwe/Enabled/" - "WebRTC-Audio-Allocation/min:6kbps,max:32kbps/" - "WebRTC-Audio-OpusMinPacketLossRate/Enabled-1/" - "WebRTC-FlexFEC-03/Enabled/" - "WebRTC-FlexFEC-03-Advertised/Enabled/" - "WebRTC-Audio-BitrateAdaptation/Enabled/WebRTC-Audio-FecAdaptation/Enabled/" - );*/ - - _streamIds.push_back("stream"); - } - - ~InstanceImplReferenceInternal() { - assert(StaticThreads::getMediaThread()->IsCurrent()); - - _peerConnection->Close(); - } - - void start() { - const auto weak = std::weak_ptr(shared_from_this()); - - PlatformInterface::SharedInstance()->configurePlatformAudio(); - - _signalingConnection.reset(new EncryptedConnection( - EncryptedConnection::Type::Signaling, - _encryptionKey, - [weak](int delayMs, int cause) { - if (delayMs == 0) { - StaticThreads::getMediaThread()->PostTask(RTC_FROM_HERE, [weak, cause](){ - auto strong = weak.lock(); - if (!strong) { - return; - } - strong->sendPendingServiceMessages(cause); - }); - } else { - StaticThreads::getMediaThread()->PostDelayedTask(RTC_FROM_HERE, [weak, cause]() { - auto strong = weak.lock(); - if (!strong) { - return; - } - strong->sendPendingServiceMessages(cause); - }, delayMs); - } - } - )); - - webrtc::PeerConnectionFactoryDependencies dependencies; - dependencies.network_thread = StaticThreads::getNetworkThread(); - dependencies.worker_thread = StaticThreads::getWorkerThread(); - dependencies.signaling_thread = StaticThreads::getMediaThread(); - dependencies.task_queue_factory = webrtc::CreateDefaultTaskQueueFactory(); - - cricket::MediaEngineDependencies mediaDeps; - mediaDeps.task_queue_factory = dependencies.task_queue_factory.get(); - mediaDeps.audio_encoder_factory = webrtc::CreateBuiltinAudioEncoderFactory(); - mediaDeps.audio_decoder_factory = webrtc::CreateBuiltinAudioDecoderFactory(); - mediaDeps.video_encoder_factory = PlatformInterface::SharedInstance()->makeVideoEncoderFactory(_platformContext); - mediaDeps.video_decoder_factory = PlatformInterface::SharedInstance()->makeVideoDecoderFactory(_platformContext); - - webrtc::AudioProcessing *apm = webrtc::AudioProcessingBuilder().Create(); - webrtc::AudioProcessing::Config audioConfig; - webrtc::AudioProcessing::Config::NoiseSuppression noiseSuppression; - noiseSuppression.enabled = true; - noiseSuppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kHigh; - audioConfig.noise_suppression = noiseSuppression; - - audioConfig.high_pass_filter.enabled = true; - - apm->ApplyConfig(audioConfig); - - mediaDeps.audio_processing = apm; - - dependencies.media_engine = cricket::CreateMediaEngine(std::move(mediaDeps)); - dependencies.call_factory = webrtc::CreateCallFactory(); - dependencies.event_log_factory = - std::make_unique(dependencies.task_queue_factory.get()); - dependencies.network_controller_factory = nullptr; - //dependencies.media_transport_factory = nullptr; - - _nativeFactory = webrtc::CreateModularPeerConnectionFactory(std::move(dependencies)); - - webrtc::PeerConnectionInterface::RTCConfiguration config; - config.sdp_semantics = webrtc::SdpSemantics::kUnifiedPlan; - //config.continual_gathering_policy = webrtc::PeerConnectionInterface::ContinualGatheringPolicy::GATHER_CONTINUALLY; - /*config.audio_jitter_buffer_fast_accelerate = true; - config.prioritize_most_likely_ice_candidate_pairs = true; - config.presume_writable_when_fully_relayed = true; - config.audio_jitter_buffer_enable_rtx_handling = true;*/ - - for (auto &server : _rtcServers) { - if (server.isTurn) { - webrtc::PeerConnectionInterface::IceServer iceServer; - std::ostringstream uri; - uri << "turn:"; - uri << server.host; - uri << ":"; - uri << server.port; - iceServer.uri = uri.str(); - iceServer.username = server.login; - iceServer.password = server.password; - config.servers.push_back(iceServer); - } else { - webrtc::PeerConnectionInterface::IceServer iceServer; - std::ostringstream uri; - uri << "stun:"; - uri << server.host; - uri << ":"; - uri << server.port; - iceServer.uri = uri.str(); - config.servers.push_back(iceServer); - } - } - - if (true || !_enableP2P) { - config.type = webrtc::PeerConnectionInterface::kRelay; - } - - _observer.reset(new PeerConnectionObserverImpl( - [weak](std::string sdp, int mid, std::string sdpMid) { - StaticThreads::getMediaThread()->PostTask(RTC_FROM_HERE, [weak, sdp, mid, sdpMid](){ - auto strong = weak.lock(); - if (strong) { - strong->emitIceCandidate(sdp, mid, sdpMid); - } - }); - }, - [weak](bool isConnected) { - StaticThreads::getMediaThread()->PostTask(RTC_FROM_HERE, [weak, isConnected](){ - auto strong = weak.lock(); - if (strong) { - strong->updateIsConnected(isConnected); - } - }); - }, - [weak](rtc::scoped_refptr transceiver) { - StaticThreads::getMediaThread()->PostTask(RTC_FROM_HERE, [weak, transceiver](){ - auto strong = weak.lock(); - if (!strong) { - return; - } - strong->onTrack(transceiver); - }); - } - )); - _peerConnection = _nativeFactory->CreatePeerConnection(config, nullptr, nullptr, _observer.get()); - assert(_peerConnection != nullptr); - - cricket::AudioOptions options; - rtc::scoped_refptr audioSource = _nativeFactory->CreateAudioSource(options); - _localAudioTrack = _nativeFactory->CreateAudioTrack("audio0", audioSource); - _peerConnection->AddTrack(_localAudioTrack, _streamIds); - - if (_videoCapture) { - beginSendingVideo(); - } - - if (_encryptionKey.isOutgoing) { - emitOffer(); - } - - beginStatsTimer(1000); - } - - void setMuteMicrophone(bool muteMicrophone) { - _localAudioTrack->set_enabled(!muteMicrophone); - changeAudioState(muteMicrophone ? AudioState::Muted : AudioState::Active); - } - - void setIncomingVideoOutput(std::shared_ptr> sink) { - if (!sink) { - return; - } - _currentSink = sink; - if (_remoteVideoTrack) { - _remoteVideoTrack->AddOrUpdateSink(_currentSink.get(), rtc::VideoSinkWants()); - } - } - - void setVideoCapture(std::shared_ptr videoCapture) { - assert(videoCapture != nullptr); - - _videoCapture = videoCapture; - - if (_preferredAspectRatio > 0.01f) { - VideoCaptureInterfaceObject *videoCaptureImpl = GetVideoCaptureAssumingSameThread(_videoCapture.get()); - videoCaptureImpl->setPreferredAspectRatio(_preferredAspectRatio); - } - beginSendingVideo(); - } - - void sendVideoDeviceUpdated() { - } - - void setRequestedVideoAspect(float aspect) { - } - - void receiveSignalingData(const std::vector &data) { - if (true) { - rtc::CopyOnWriteBuffer packet; - packet.SetData(data.data(), data.size()); - processSignalingData(packet); - return; - } - - if (const auto packet = _signalingConnection->handleIncomingPacket((const char *)data.data(), data.size())) { - const auto mainMessage = &packet->main.message.data; - if (const auto signalingData = absl::get_if(mainMessage)) { - processSignalingData(signalingData->data); - } - for (auto &it : packet->additional) { - const auto additionalMessage = &it.message.data; - if (const auto signalingData = absl::get_if(additionalMessage)) { - processSignalingData(signalingData->data); - } - } - } - } - - void processSignalingData(const rtc::CopyOnWriteBuffer &decryptedPacket) { - rtc::ByteBufferReader reader((const char *)decryptedPacket.data(), decryptedPacket.size()); - uint8_t command = 0; - if (!reader.ReadUInt8(&command)) { - return; - } - if (command == 1) { - uint32_t sdpLength = 0; - if (!reader.ReadUInt32(&sdpLength)) { - return; - } - std::string sdp; - if (!reader.ReadString(&sdp, sdpLength)) { - return; - } - uint32_t mid = 0; - if (!reader.ReadUInt32(&mid)) { - return; - } - uint32_t sdpMidLength = 0; - if (!reader.ReadUInt32(&sdpMidLength)) { - return; - } - std::string sdpMid; - if (!reader.ReadString(&sdpMid, sdpMidLength)) { - return; - } - _pendingRemoteIceCandidates.push_back(std::make_shared(sdpMid, mid, sdp)); - processRemoteIceCandidatesIfReady(); - } else if (command == 2) { - uint32_t sdpLength = 0; - if (!reader.ReadUInt32(&sdpLength)) { - return; - } - std::string sdp; - if (!reader.ReadString(&sdp, sdpLength)) { - return; - } - uint32_t typeLength = 0; - if (!reader.ReadUInt32(&typeLength)) { - return; - } - std::string type; - if (!reader.ReadString(&type, typeLength)) { - return; - } - webrtc::SdpParseError error; - webrtc::SessionDescriptionInterface *sessionDescription = webrtc::CreateSessionDescription(type, sdp, &error); - if (sessionDescription != nullptr) { - const auto weak = std::weak_ptr(shared_from_this()); - rtc::scoped_refptr observer(new rtc::RefCountedObject([weak]() { - StaticThreads::getMediaThread()->PostTask(RTC_FROM_HERE, [weak](){ - auto strong = weak.lock(); - if (!strong) { - return; - } - strong->emitAnswer(); - }); - })); - _peerConnection->SetRemoteDescription(observer, sessionDescription); - _didSetRemoteDescription = true; - processRemoteIceCandidatesIfReady(); - } - } else if (command == 3) { - uint32_t sdpLength = 0; - if (!reader.ReadUInt32(&sdpLength)) { - return; - } - std::string sdp; - if (!reader.ReadString(&sdp, sdpLength)) { - return; - } - uint32_t typeLength = 0; - if (!reader.ReadUInt32(&typeLength)) { - return; - } - std::string type; - if (!reader.ReadString(&type, typeLength)) { - return; - } - webrtc::SdpParseError error; - webrtc::SessionDescriptionInterface *sessionDescription = webrtc::CreateSessionDescription(type, sdp, &error); - if (sessionDescription != nullptr) { - rtc::scoped_refptr observer(new rtc::RefCountedObject([]() { - })); - _peerConnection->SetRemoteDescription(observer, sessionDescription); - _didSetRemoteDescription = true; - processRemoteIceCandidatesIfReady(); - } - } else if (command == 4) { - uint8_t value = 0; - if (!reader.ReadUInt8(&value)) { - return; - } - const auto audio = AudioState(value & 0x01); - const auto video = VideoState((value >> 1) & 0x03); - if (video == VideoState(0x03)) { - return; - } - _remoteMediaStateUpdated(audio, video); - } else if (command == 6) { - uint32_t value = 0; - if (!reader.ReadUInt32(&value)) { - return; - } - _preferredAspectRatio = ((float)value) / 1000.0f; - if (_videoCapture) { - VideoCaptureInterfaceObject *videoCaptureImpl = GetVideoCaptureAssumingSameThread(_videoCapture.get()); - videoCaptureImpl->setPreferredAspectRatio(_preferredAspectRatio); - } - _remotePrefferedAspectRatioUpdated(_preferredAspectRatio); - } - } - -private: - void beginStatsTimer(int timeoutMs) { - const auto weak = std::weak_ptr(shared_from_this()); - StaticThreads::getMediaThread()->PostDelayedTask(RTC_FROM_HERE, [weak]() { - StaticThreads::getMediaThread()->PostTask(RTC_FROM_HERE, [weak](){ - auto strong = weak.lock(); - if (!strong) { - return; - } - strong->collectStats(); - }); - }, timeoutMs); - } - - void collectStats() { - const auto weak = std::weak_ptr(shared_from_this()); - - rtc::scoped_refptr observer(new rtc::RefCountedObject([weak](const rtc::scoped_refptr &stats) { - StaticThreads::getMediaThread()->PostTask(RTC_FROM_HERE, [weak, stats](){ - auto strong = weak.lock(); - if (!strong) { - return; - } - strong->reportStats(stats); - strong->beginStatsTimer(5000); - }); - })); - _peerConnection->GetStats(observer); - } - - void reportStats(const rtc::scoped_refptr &stats) { - int32_t inboundPacketsReceived = 0; - int32_t inboundPacketsLost = 0; - - for (auto it = stats->begin(); it != stats->end(); it++) { - if (it->type() == std::string("inbound-rtp")) { - for (auto &member : it->Members()) { - if (member->name() == std::string("packetsLost")) { - inboundPacketsLost = *(member->cast_to>()); - } else if (member->name() == std::string("packetsReceived")) { - inboundPacketsReceived = *(member->cast_to>()); - } - } - } - } - - int32_t deltaPacketsReceived = inboundPacketsReceived - _statsData.packetsReceived; - int32_t deltaPacketsLost = inboundPacketsLost - _statsData.packetsLost; - - _statsData.packetsReceived = inboundPacketsReceived; - _statsData.packetsLost = inboundPacketsLost; - - float signalBarsNorm = 5.0f; - - if (deltaPacketsReceived > 0) { - float lossRate = ((float)deltaPacketsLost) / ((float)deltaPacketsReceived); - float adjustedLossRate = lossRate / 0.1f; - adjustedLossRate = fmaxf(0.0f, adjustedLossRate); - adjustedLossRate = fminf(1.0f, adjustedLossRate); - float adjustedQuality = 1.0f - adjustedLossRate; - _signalBarsUpdated((int)(adjustedQuality * signalBarsNorm)); - } else { - _signalBarsUpdated((int)(1.0f * signalBarsNorm)); - } - } - - void sendPendingServiceMessages(int cause) { - if (const auto prepared = _signalingConnection->prepareForSendingService(cause)) { - _signalingDataEmitted(prepared->bytes); - } - } - - void emitSignaling(const rtc::ByteBufferWriter &buffer) { - rtc::CopyOnWriteBuffer packet; - packet.SetData(buffer.Data(), buffer.Length()); - - if (true) { - std::vector result; - result.resize(buffer.Length()); - memcpy(result.data(), buffer.Data(), buffer.Length()); - _signalingDataEmitted(result); - return; - } - - if (const auto prepared = _signalingConnection->prepareForSending(Message{ UnstructuredDataMessage{ packet } })) { - _signalingDataEmitted(prepared->bytes); - } - } - - void emitIceCandidate(std::string sdp, int mid, std::string sdpMid) { - RTC_LOG(LS_INFO) << "emitIceCandidate " << sdp << ", " << mid << ", " << sdpMid; - - rtc::ByteBufferWriter writer; - writer.WriteUInt8(1); - writer.WriteUInt32((uint32_t)sdp.size()); - writer.WriteString(sdp); - writer.WriteUInt32((uint32_t)mid); - writer.WriteUInt32((uint32_t)sdpMid.size()); - writer.WriteString(sdpMid); - - emitSignaling(writer); - } - - void emitOffer() { - const auto weak = std::weak_ptr(shared_from_this()); - - webrtc::PeerConnectionInterface::RTCOfferAnswerOptions options; - options.offer_to_receive_audio = 1; - if (_videoCapture) { - options.offer_to_receive_video = 1; - } - - rtc::scoped_refptr observer(new rtc::RefCountedObject([weak](std::string sdp, std::string type) { - StaticThreads::getMediaThread()->PostTask(RTC_FROM_HERE, [weak, sdp, type](){ - auto strong = weak.lock(); - if (!strong) { - return; - } - - webrtc::SdpParseError error; - webrtc::SessionDescriptionInterface *sessionDescription = webrtc::CreateSessionDescription(type, sdp, &error); - if (sessionDescription != nullptr) { - rtc::scoped_refptr observer(new rtc::RefCountedObject([weak, sdp, type]() { - auto strong = weak.lock(); - if (!strong) { - return; - } - strong->emitOfferData(sdp, type); - })); - strong->_peerConnection->SetLocalDescription(observer, sessionDescription); - } - }); - })); - _peerConnection->CreateOffer(observer, options); - } - - void emitOfferData(std::string sdp, std::string type) { - rtc::ByteBufferWriter writer; - writer.WriteUInt8(2); - writer.WriteUInt32((uint32_t)sdp.size()); - writer.WriteString(sdp); - writer.WriteUInt32((uint32_t)type.size()); - writer.WriteString(type); - - emitSignaling(writer); - } - - void emitAnswerData(std::string sdp, std::string type) { - rtc::ByteBufferWriter writer; - writer.WriteUInt8(3); - writer.WriteUInt32((uint32_t)sdp.size()); - writer.WriteString(sdp); - writer.WriteUInt32((uint32_t)type.size()); - writer.WriteString(type); - - emitSignaling(writer); - } - - void emitAnswer() { - const auto weak = std::weak_ptr(shared_from_this()); - - webrtc::PeerConnectionInterface::RTCOfferAnswerOptions options; - options.offer_to_receive_audio = 1; - if (_videoCapture) { - options.offer_to_receive_video = 1; - } - - rtc::scoped_refptr observer(new rtc::RefCountedObject([weak](std::string sdp, std::string type) { - StaticThreads::getMediaThread()->PostTask(RTC_FROM_HERE, [weak, sdp, type](){ - auto strong = weak.lock(); - if (!strong) { - return; - } - - webrtc::SdpParseError error; - webrtc::SessionDescriptionInterface *sessionDescription = webrtc::CreateSessionDescription(type, sdp, &error); - if (sessionDescription != nullptr) { - rtc::scoped_refptr observer(new rtc::RefCountedObject([weak, sdp, type]() { - auto strong = weak.lock(); - if (!strong) { - return; - } - strong->emitAnswerData(sdp, type); - })); - strong->_peerConnection->SetLocalDescription(observer, sessionDescription); - } - }); - })); - _peerConnection->CreateAnswer(observer, options); - - } - - void changeVideoState(VideoState state) { - if (_videoState != state) { - _videoState = state; - emitMediaState(); - } - } - - void changeAudioState(AudioState state) { - if (_audioState != state) { - _audioState = state; - emitMediaState(); - } - } - - void emitMediaState() { - rtc::ByteBufferWriter writer; - writer.WriteUInt8(4); - writer.WriteUInt8((uint8_t(_videoState) << 1) | uint8_t(_audioState)); - - emitSignaling(writer); - } - - void emitRequestVideo() { - rtc::ByteBufferWriter writer; - writer.WriteUInt8(5); - - emitSignaling(writer); - } - - void emitVideoParameters() { - if (_localPreferredVideoAspectRatio > 0.01f) { - rtc::ByteBufferWriter writer; - writer.WriteUInt8(6); - writer.WriteUInt32((uint32_t)(_localPreferredVideoAspectRatio * 1000.0f)); - - emitSignaling(writer); - } - } - - void processRemoteIceCandidatesIfReady() { - if (_pendingRemoteIceCandidates.size() == 0 || !_didSetRemoteDescription) { - return; - } - - for (auto &it : _pendingRemoteIceCandidates) { - webrtc::SdpParseError error; - webrtc::IceCandidateInterface *iceCandidate = webrtc::CreateIceCandidate(it->sdpMid, it->mid, it->sdp, &error); - if (iceCandidate != nullptr) { - std::unique_ptr nativeCandidate = std::unique_ptr(iceCandidate); - _peerConnection->AddIceCandidate(std::move(nativeCandidate), [](auto error) { - }); - } - } - _pendingRemoteIceCandidates.clear(); - } - - void updateIsConnected(bool isConnected) { - if (isConnected) { - _state = State::Established; - if (!_didConnectOnce) { - _didConnectOnce = true; - } - } else { - _state = State::Reconnecting; - } - _stateUpdated(_state); - } - - void onTrack(rtc::scoped_refptr transceiver) { - if (!_remoteVideoTrack) { - if (transceiver->media_type() == cricket::MediaType::MEDIA_TYPE_VIDEO) { - _remoteVideoTrack = static_cast(transceiver->receiver()->track().get()); - } - if (_remoteVideoTrack && _currentSink) { - _remoteVideoTrack->AddOrUpdateSink(_currentSink.get(), rtc::VideoSinkWants()); - } - } - } - - void beginSendingVideo() { - if (!_videoCapture) { - return; - } - - VideoCaptureInterfaceObject *videoCaptureImpl = GetVideoCaptureAssumingSameThread(_videoCapture.get()); - - const auto weak = std::weak_ptr(shared_from_this()); - - videoCaptureImpl->setStateUpdated([weak](VideoState state) { - StaticThreads::getMediaThread()->PostTask(RTC_FROM_HERE, [weak, state](){ - auto strong = weak.lock(); - if (strong) { - strong->changeVideoState(state); - } - }); - }); - - _localVideoTrack = _nativeFactory->CreateVideoTrack("video0", videoCaptureImpl->source()); - _peerConnection->AddTrack(_localVideoTrack, _streamIds); - for (auto &it : _peerConnection->GetTransceivers()) { - if (it->media_type() == cricket::MediaType::MEDIA_TYPE_VIDEO) { - auto capabilities = _nativeFactory->GetRtpSenderCapabilities( - cricket::MediaType::MEDIA_TYPE_VIDEO); - - std::vector codecs; - for (auto &codec : capabilities.codecs) { -#ifndef WEBRTC_DISABLE_H265 - if (codec.name == cricket::kH265CodecName) { - codecs.insert(codecs.begin(), codec); - } else { - codecs.push_back(codec); - } -#else - codecs.push_back(codec); -#endif - } - it->SetCodecPreferences(codecs); - - break; - } - } - - if (_didConnectOnce && _encryptionKey.isOutgoing) { - emitOffer(); - } - - emitVideoParameters(); - } - -private: - EncryptionKey _encryptionKey; - std::vector _rtcServers; - bool _enableP2P; - std::function _stateUpdated; - std::function _signalBarsUpdated; - std::function &)> _signalingDataEmitted; - std::function _remoteMediaStateUpdated; - std::function _remoteBatteryLevelIsLowUpdated; - std::function _remotePrefferedAspectRatioUpdated; - std::shared_ptr _videoCapture; - std::unique_ptr _signalingConnection; - float _localPreferredVideoAspectRatio = 0.0f; - float _preferredAspectRatio = 0.0f; - - State _state = State::WaitInit; - AudioState _audioState = AudioState::Active; - VideoState _videoState = VideoState::Inactive; - bool _didConnectOnce = false; - - std::vector _streamIds; - - StatsData _statsData; - - rtc::scoped_refptr _nativeFactory; - std::unique_ptr _observer; - rtc::scoped_refptr _peerConnection; - std::unique_ptr _nativeConstraints; - rtc::scoped_refptr _localAudioTrack; - rtc::scoped_refptr _localVideoTrack; - rtc::scoped_refptr _remoteVideoTrack; - - std::shared_ptr> _currentSink; - - bool _didSetRemoteDescription = false; - std::vector> _pendingRemoteIceCandidates; - - std::shared_ptr _platformContext; -}; - -InstanceImplReference::InstanceImplReference(Descriptor &&descriptor) : - logSink_(std::make_unique(descriptor.config.logPath)) { - rtc::LogMessage::AddLogToStream(logSink_.get(), rtc::LS_INFO); - - internal_.reset(new ThreadLocalObject(StaticThreads::getMediaThread(), [descriptor = std::move(descriptor)]() { - return new InstanceImplReferenceInternal( - descriptor - ); - })); - internal_->perform(RTC_FROM_HERE, [](InstanceImplReferenceInternal *internal){ - internal->start(); - }); -} - -InstanceImplReference::~InstanceImplReference() { - rtc::LogMessage::RemoveLogToStream(logSink_.get()); -} - -void InstanceImplReference::setNetworkType(NetworkType networkType) { -} - -void InstanceImplReference::setMuteMicrophone(bool muteMicrophone) { - internal_->perform(RTC_FROM_HERE, [muteMicrophone = muteMicrophone](InstanceImplReferenceInternal *internal) { - internal->setMuteMicrophone(muteMicrophone); - }); -} - -void InstanceImplReference::receiveSignalingData(const std::vector &data) { - internal_->perform(RTC_FROM_HERE, [data](InstanceImplReferenceInternal *internal) { - internal->receiveSignalingData(data); - }); -} - -void InstanceImplReference::setVideoCapture(std::shared_ptr videoCapture) { - internal_->perform(RTC_FROM_HERE, [videoCapture](InstanceImplReferenceInternal *internal) { - internal->setVideoCapture(videoCapture); - }); -} - -void InstanceImplReference::setRequestedVideoAspect(float aspect) { - internal_->perform(RTC_FROM_HERE, [aspect](InstanceImplReferenceInternal *internal) { - internal->setRequestedVideoAspect(aspect); - }); -} - -void InstanceImplReference::setIncomingVideoOutput(std::shared_ptr> sink) { - internal_->perform(RTC_FROM_HERE, [sink](InstanceImplReferenceInternal *internal) { - internal->setIncomingVideoOutput(sink); - }); -} - -void InstanceImplReference::setAudioOutputGainControlEnabled(bool enabled) { -} - -void InstanceImplReference::setEchoCancellationStrength(int strength) { -} - -void InstanceImplReference::setAudioInputDevice(std::string id) { -} - -void InstanceImplReference::setAudioOutputDevice(std::string id) { -} - -void InstanceImplReference::setInputVolume(float level) { -} - -void InstanceImplReference::setOutputVolume(float level) { -} - -void InstanceImplReference::setAudioOutputDuckingEnabled(bool enabled) { -} - -void InstanceImplReference::setIsLowBatteryLevel(bool isLowBatteryLevel) { -} - -int InstanceImplReference::GetConnectionMaxLayer() { - return 92; -} - -std::vector InstanceImplReference::GetVersions() { - std::vector result; - result.push_back("2.8.8"); - return result; -} - -std::string InstanceImplReference::getLastError() { - return "ERROR_UNKNOWN"; -} - -std::string InstanceImplReference::getDebugInfo() { - return ""; -} - -int64_t InstanceImplReference::getPreferredRelayId() { - return 0; -} - -TrafficStats InstanceImplReference::getTrafficStats() { - auto result = TrafficStats(); - return result; -} - -PersistentState InstanceImplReference::getPersistentState() { - return PersistentState(); -} - -void InstanceImplReference::stop(std::function completion) { - auto result = FinalState(); - - result.persistentState = getPersistentState(); - result.debugLog = logSink_->result(); - result.trafficStats = getTrafficStats(); - result.isRatingSuggested = false; - - completion(result); -} - -template <> -bool Register() { - return Meta::RegisterOne(); -} - -} // namespace tgcalls diff --git a/TMessagesProj/jni/voip/tgcalls/reference/InstanceImplReference.h b/TMessagesProj/jni/voip/tgcalls/reference/InstanceImplReference.h deleted file mode 100644 index e4808d92a..000000000 --- a/TMessagesProj/jni/voip/tgcalls/reference/InstanceImplReference.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef TGCALLS_INSTANCE_IMPL_REFERENCE_H -#define TGCALLS_INSTANCE_IMPL_REFERENCE_H - -#include "Instance.h" -#include "ThreadLocalObject.h" - -namespace tgcalls { - -class LogSinkImpl; -class InstanceImplReferenceInternal; - -class InstanceImplReference : public Instance { -public: - explicit InstanceImplReference(Descriptor &&descriptor); - ~InstanceImplReference(); - - void receiveSignalingData(const std::vector &data) override; - void setNetworkType(NetworkType networkType) override; - void setMuteMicrophone(bool muteMicrophone) override; - void setVideoCapture(std::shared_ptr videoCapture) override; - void sendVideoDeviceUpdated() override { - } - void setRequestedVideoAspect(float aspect) override; - bool supportsVideo() override { - return true; - } - void setIncomingVideoOutput(std::shared_ptr> sink) override; - void setAudioOutputGainControlEnabled(bool enabled) override; - void setEchoCancellationStrength(int strength) override; - void setAudioInputDevice(std::string id) override; - void setAudioOutputDevice(std::string id) override; - void setInputVolume(float level) override; - void setOutputVolume(float level) override; - void setAudioOutputDuckingEnabled(bool enabled) override; - void setIsLowBatteryLevel(bool isLowBatteryLevel) override; - static int GetConnectionMaxLayer(); - static std::vector GetVersions(); - std::string getLastError() override; - std::string getDebugInfo() override; - int64_t getPreferredRelayId() override; - TrafficStats getTrafficStats() override; - PersistentState getPersistentState() override; - void stop(std::function completion) override; - -private: - std::unique_ptr logSink_; - std::unique_ptr> internal_; - -}; - -} // namespace tgcalls - -#endif diff --git a/TMessagesProj/jni/voip/tgcalls/v2/InstanceV2Impl.cpp b/TMessagesProj/jni/voip/tgcalls/v2/InstanceV2Impl.cpp index b1ccd8b95..51fd9e182 100644 --- a/TMessagesProj/jni/voip/tgcalls/v2/InstanceV2Impl.cpp +++ b/TMessagesProj/jni/voip/tgcalls/v2/InstanceV2Impl.cpp @@ -21,20 +21,17 @@ #include "system_wrappers/include/field_trial.h" #include "api/video/builtin_video_bitrate_allocator_factory.h" #include "call/call.h" -#include "modules/rtp_rtcp/source/rtp_utility.h" #include "api/call/audio_sink.h" #include "modules/audio_processing/audio_buffer.h" #include "absl/strings/match.h" -#include "modules/audio_processing/agc2/vad_with_level.h" #include "pc/channel_manager.h" -#include "media/base/rtp_data_engine.h" #include "audio/audio_state.h" #include "modules/audio_coding/neteq/default_neteq_factory.h" #include "modules/audio_coding/include/audio_coding_module.h" #include "api/candidate.h" #include "api/jsep_ice_candidate.h" -#include "media/base/h264_profile_level_id.h" #include "pc/used_ids.h" +#include "media/base/sdp_video_format_utils.h" #include "AudioFrame.h" #include "ThreadLocalObject.h" @@ -46,20 +43,15 @@ #include "CodecSelectHelper.h" #include "AudioDeviceHelper.h" #include "SignalingEncryption.h" - +#ifdef WEBRTC_IOS +#include "platform/darwin/iOS/tgcalls_audio_device_module_ios.h" +#endif #include #include namespace tgcalls { namespace { -static int stringToInt(std::string const &string) { - std::stringstream stringStream(string); - int value = 0; - stringStream >> value; - return value; -} - static std::string intToString(int value) { std::ostringstream stringStream; stringStream << value; @@ -181,7 +173,7 @@ static void NegotiateCodecs(const std::vector& local_codecs, negotiated.SetParam(cricket::kCodecParamAssociatedPayloadType, apt_it->second); } if (absl::EqualsIgnoreCase(ours.name, cricket::kH264CodecName)) { - webrtc::H264::GenerateProfileLevelIdForAnswer( + webrtc::H264GenerateProfileLevelIdForAnswer( ours.params, theirs.params, &negotiated.params); } negotiated.id = theirs.id; @@ -289,18 +281,29 @@ static std::vector generateAvailableVideoFormats(std::vecto return {}; } - constexpr int kFirstDynamicPayloadType = 120; + constexpr int kFirstDynamicPayloadType = 100; constexpr int kLastDynamicPayloadType = 127; int payload_type = kFirstDynamicPayloadType; std::vector result; - bool codecSelected = false; + //bool codecSelected = false; for (const auto &format : formats) { - if (codecSelected) { + /*if (codecSelected) { break; + }*/ + + bool alreadyAdded = false; + for (const auto &it : result) { + if (it.videoCodec.name == format.name) { + alreadyAdded = true; + break; + } + } + if (alreadyAdded) { + continue; } OutgoingVideoFormat resultFormat; @@ -309,12 +312,8 @@ static std::vector generateAvailableVideoFormats(std::vecto codec.id = payload_type; addDefaultFeedbackParams(&codec); - if (!absl::EqualsIgnoreCase(codec.name, cricket::kVp8CodecName)) { - continue; - } - resultFormat.videoCodec = codec; - codecSelected = true; + //codecSelected = true; // Increment payload type. ++payload_type; @@ -434,17 +433,20 @@ struct NegotiatedMediaContent { static bool FindByUri(const cricket::RtpHeaderExtensions& extensions, const webrtc::RtpExtension& ext_to_match, webrtc::RtpExtension* found_extension) { - // We assume that all URIs are given in a canonical format. - const webrtc::RtpExtension* found = - webrtc::RtpExtension::FindHeaderExtensionByUri(extensions, - ext_to_match.uri); - if (!found) { - return false; - } - if (found_extension) { - *found_extension = *found; - } - return true; + // We assume that all URIs are given in a canonical format. + const webrtc::RtpExtension* found = + webrtc::RtpExtension::FindHeaderExtensionByUri( + extensions, + ext_to_match.uri, + webrtc::RtpExtension::Filter::kPreferEncryptedExtension + ); + if (!found) { + return false; + } + if (found_extension) { + *found_extension = *found; + } + return true; } template @@ -530,6 +532,7 @@ public: NegotiatedMediaContent const &mediaContent, std::shared_ptr threads ) : + _threads(threads), _ssrc(mediaContent.ssrc), _call(call), _channelManager(channelManager), @@ -584,7 +587,7 @@ public: outgoingAudioDescription->set_rtcp_reduced_size(true); outgoingAudioDescription->set_direction(webrtc::RtpTransceiverDirection::kSendOnly); outgoingAudioDescription->set_codecs(codecs); - outgoingAudioDescription->set_bandwidth(1032000); + outgoingAudioDescription->set_bandwidth(-1); outgoingAudioDescription->AddStream(cricket::StreamParams::CreateLegacy(_ssrc)); auto incomingAudioDescription = std::make_unique(); @@ -595,21 +598,22 @@ public: incomingAudioDescription->set_rtcp_reduced_size(true); incomingAudioDescription->set_direction(webrtc::RtpTransceiverDirection::kRecvOnly); incomingAudioDescription->set_codecs(codecs); - incomingAudioDescription->set_bandwidth(1032000); + incomingAudioDescription->set_bandwidth(-1); - _outgoingAudioChannel->SetPayloadTypeDemuxingEnabled(false); - _outgoingAudioChannel->SetLocalContent(outgoingAudioDescription.get(), webrtc::SdpType::kOffer, nullptr); - _outgoingAudioChannel->SetRemoteContent(incomingAudioDescription.get(), webrtc::SdpType::kAnswer, nullptr); + _threads->getWorkerThread()->Invoke(RTC_FROM_HERE, [&]() { + _outgoingAudioChannel->SetPayloadTypeDemuxingEnabled(false); + _outgoingAudioChannel->SetLocalContent(outgoingAudioDescription.get(), webrtc::SdpType::kOffer, nullptr); + _outgoingAudioChannel->SetRemoteContent(incomingAudioDescription.get(), webrtc::SdpType::kAnswer, nullptr); + }); - _outgoingAudioChannel->SignalSentPacket().connect(this, &OutgoingAudioChannel::OnSentPacket_w); + //_outgoingAudioChannel->SignalSentPacket().connect(this, &OutgoingAudioChannel::OnSentPacket_w); //_outgoingAudioChannel->UpdateRtpTransport(nullptr); setIsMuted(false); } ~OutgoingAudioChannel() { - _outgoingAudioChannel->SignalSentPacket().disconnect(this); - _outgoingAudioChannel->media_channel()->SetAudioSend(_ssrc, false, nullptr, _audioSource); + //_outgoingAudioChannel->SignalSentPacket().disconnect(this); _outgoingAudioChannel->Enable(false); _channelManager->DestroyVoiceChannel(_outgoingAudioChannel); _outgoingAudioChannel = nullptr; @@ -617,10 +621,12 @@ public: void setIsMuted(bool isMuted) { if (_isMuted != isMuted) { - _isMuted = false; + _isMuted = isMuted; _outgoingAudioChannel->Enable(!_isMuted); - _outgoingAudioChannel->media_channel()->SetAudioSend(_ssrc, !_isMuted, nullptr, _audioSource); + _threads->getWorkerThread()->Invoke(RTC_FROM_HERE, [&]() { + _outgoingAudioChannel->media_channel()->SetAudioSend(_ssrc, !_isMuted, nullptr, _audioSource); + }); } } @@ -630,6 +636,7 @@ private: } private: + std::shared_ptr _threads; uint32_t _ssrc = 0; webrtc::Call *_call = nullptr; cricket::ChannelManager *_channelManager = nullptr; @@ -671,7 +678,7 @@ public: outgoingAudioDescription->set_rtcp_reduced_size(true); outgoingAudioDescription->set_direction(webrtc::RtpTransceiverDirection::kRecvOnly); outgoingAudioDescription->set_codecs(audioCodecs); - outgoingAudioDescription->set_bandwidth(1032000); + outgoingAudioDescription->set_bandwidth(-1); auto incomingAudioDescription = std::make_unique(); for (const auto &rtpExtension : mediaContent.rtpExtensions) { @@ -681,14 +688,16 @@ public: incomingAudioDescription->set_rtcp_reduced_size(true); incomingAudioDescription->set_direction(webrtc::RtpTransceiverDirection::kSendOnly); incomingAudioDescription->set_codecs(audioCodecs); - incomingAudioDescription->set_bandwidth(1032000); + incomingAudioDescription->set_bandwidth(-1); cricket::StreamParams streamParams = cricket::StreamParams::CreateLegacy(mediaContent.ssrc); streamParams.set_stream_ids({ streamId }); incomingAudioDescription->AddStream(streamParams); - _audioChannel->SetPayloadTypeDemuxingEnabled(false); - _audioChannel->SetLocalContent(outgoingAudioDescription.get(), webrtc::SdpType::kOffer, nullptr); - _audioChannel->SetRemoteContent(incomingAudioDescription.get(), webrtc::SdpType::kAnswer, nullptr); + threads->getWorkerThread()->Invoke(RTC_FROM_HERE, [&]() { + _audioChannel->SetPayloadTypeDemuxingEnabled(false); + _audioChannel->SetLocalContent(outgoingAudioDescription.get(), webrtc::SdpType::kOffer, nullptr); + _audioChannel->SetRemoteContent(incomingAudioDescription.get(), webrtc::SdpType::kAnswer, nullptr); + }); outgoingAudioDescription.reset(); incomingAudioDescription.reset(); @@ -696,14 +705,10 @@ public: //std::unique_ptr audioLevelSink(new AudioSinkImpl(onAudioLevelUpdated, _ssrc, std::move(onAudioFrame))); //_audioChannel->media_channel()->SetRawAudioSink(ssrc.networkSsrc, std::move(audioLevelSink)); - _audioChannel->SignalSentPacket().connect(this, &IncomingV2AudioChannel::OnSentPacket_w); - //_audioChannel->UpdateRtpTransport(nullptr); - _audioChannel->Enable(true); } ~IncomingV2AudioChannel() { - _audioChannel->SignalSentPacket().disconnect(this); _audioChannel->Enable(false); _channelManager->DestroyVoiceChannel(_audioChannel); _audioChannel = nullptr; @@ -739,7 +744,7 @@ private: class OutgoingVideoChannel : public sigslot::has_slots<>, public std::enable_shared_from_this { public: - static absl::optional createOutgoingContentDescription(std::vector const &availableVideoFormats) { + static absl::optional createOutgoingContentDescription(std::vector const &availableVideoFormats, bool isScreencast) { signaling::MediaContent mediaContent; auto generator = std::mt19937(std::random_device()()); @@ -758,7 +763,31 @@ public: fidGroup.ssrcs.push_back(mediaContent.ssrc + 1); mediaContent.ssrcGroups.push_back(std::move(fidGroup)); - const auto videoFormats = generateAvailableVideoFormats(availableVideoFormats); + auto unsortedVideoFormats = generateAvailableVideoFormats(availableVideoFormats); + + std::vector formatPreferences; + if (isScreencast) { + formatPreferences.push_back(cricket::kVp8CodecName); + } else { +#ifndef WEBRTC_DISABLE_H265 + formatPreferences.push_back(cricket::kH265CodecName); +#endif + formatPreferences.push_back(cricket::kH264CodecName); + } + + std::vector videoFormats; + for (const auto &name : formatPreferences) { + for (size_t i = 0; i < unsortedVideoFormats.size(); i++) { + if (absl::EqualsIgnoreCase(name, unsortedVideoFormats[i].videoCodec.name)) { + videoFormats.push_back(unsortedVideoFormats[i]); + unsortedVideoFormats.erase(unsortedVideoFormats.begin() + i); + break; + } + } + } + for (const auto &format : unsortedVideoFormats) { + videoFormats.push_back(format); + } for (const auto &format : videoFormats) { signaling::PayloadType videoPayload; @@ -818,14 +847,17 @@ public: rtc::UniqueRandomIdGenerator *randomIdGenerator, webrtc::VideoBitrateAllocatorFactory *videoBitrateAllocatorFactory, std::function rotationUpdated, - NegotiatedMediaContent const &mediaContent + NegotiatedMediaContent const &mediaContent, + bool isScreencast ) : _threads(threads), _mainSsrc(mediaContent.ssrc), _call(call), _channelManager(channelManager), _rotationUpdated(rotationUpdated) { - _outgoingVideoChannel = _channelManager->CreateVideoChannel(call, cricket::MediaConfig(), rtpTransport, threads->getMediaThread(), "out1", false, NativeNetworkingImpl::getDefaulCryptoOptions(), randomIdGenerator, cricket::VideoOptions(), videoBitrateAllocatorFactory); + cricket::VideoOptions videoOptions; + videoOptions.is_screencast = isScreencast; + _outgoingVideoChannel = _channelManager->CreateVideoChannel(call, cricket::MediaConfig(), rtpTransport, threads->getMediaThread(), "out" + intToString(mediaContent.ssrc), false, NativeNetworkingImpl::getDefaulCryptoOptions(), randomIdGenerator, videoOptions, videoBitrateAllocatorFactory); auto videoCodecs = mediaContent.codecs; @@ -838,7 +870,7 @@ public: outgoingVideoDescription->set_rtcp_reduced_size(true); outgoingVideoDescription->set_direction(webrtc::RtpTransceiverDirection::kSendOnly); outgoingVideoDescription->set_codecs(videoCodecs); - outgoingVideoDescription->set_bandwidth(1032000); + outgoingVideoDescription->set_bandwidth(-1); cricket::StreamParams videoSendStreamParams; @@ -863,26 +895,26 @@ public: incomingVideoDescription->set_rtcp_reduced_size(true); incomingVideoDescription->set_direction(webrtc::RtpTransceiverDirection::kRecvOnly); incomingVideoDescription->set_codecs(videoCodecs); - incomingVideoDescription->set_bandwidth(1032000); + incomingVideoDescription->set_bandwidth(-1); - _outgoingVideoChannel->SetPayloadTypeDemuxingEnabled(false); - _outgoingVideoChannel->SetLocalContent(outgoingVideoDescription.get(), webrtc::SdpType::kOffer, nullptr); - _outgoingVideoChannel->SetRemoteContent(incomingVideoDescription.get(), webrtc::SdpType::kAnswer, nullptr); + threads->getWorkerThread()->Invoke(RTC_FROM_HERE, [&]() { + _outgoingVideoChannel->SetPayloadTypeDemuxingEnabled(false); + _outgoingVideoChannel->SetLocalContent(outgoingVideoDescription.get(), webrtc::SdpType::kOffer, nullptr); + _outgoingVideoChannel->SetRemoteContent(incomingVideoDescription.get(), webrtc::SdpType::kAnswer, nullptr); - webrtc::RtpParameters rtpParameters = _outgoingVideoChannel->media_channel()->GetRtpSendParameters(mediaContent.ssrc); + webrtc::RtpParameters rtpParameters = _outgoingVideoChannel->media_channel()->GetRtpSendParameters(mediaContent.ssrc); - _outgoingVideoChannel->media_channel()->SetRtpSendParameters(mediaContent.ssrc, rtpParameters); - - _outgoingVideoChannel->SignalSentPacket().connect(this, &OutgoingVideoChannel::OnSentPacket_w); - //_outgoingVideoChannel->UpdateRtpTransport(nullptr); + _outgoingVideoChannel->media_channel()->SetRtpSendParameters(mediaContent.ssrc, rtpParameters); + }); _outgoingVideoChannel->Enable(false); - _outgoingVideoChannel->media_channel()->SetVideoSend(mediaContent.ssrc, NULL, nullptr); + + threads->getWorkerThread()->Invoke(RTC_FROM_HERE, [&]() { + _outgoingVideoChannel->media_channel()->SetVideoSend(mediaContent.ssrc, NULL, nullptr); + }); } ~OutgoingVideoChannel() { - _outgoingVideoChannel->SignalSentPacket().disconnect(this); - _outgoingVideoChannel->media_channel()->SetVideoSend(_mainSsrc, nullptr, nullptr); _outgoingVideoChannel->Enable(false); _channelManager->DestroyVideoChannel(_outgoingVideoChannel); _outgoingVideoChannel = nullptr; @@ -894,7 +926,10 @@ public: if (_videoCapture) { _outgoingVideoChannel->Enable(true); auto videoCaptureImpl = GetVideoCaptureAssumingSameThread(_videoCapture.get()); - _outgoingVideoChannel->media_channel()->SetVideoSend(_mainSsrc, NULL, videoCaptureImpl->source()); + + _threads->getWorkerThread()->Invoke(RTC_FROM_HERE, [&]() { + _outgoingVideoChannel->media_channel()->SetVideoSend(_mainSsrc, NULL, videoCaptureImpl->source()); + }); const auto weak = std::weak_ptr(shared_from_this()); videoCaptureImpl->setRotationUpdated([threads = _threads, weak](int angle) { @@ -958,7 +993,10 @@ public: } else { _videoRotation = signaling::MediaStateMessage::VideoRotation::Rotation0; _outgoingVideoChannel->Enable(false); - _outgoingVideoChannel->media_channel()->SetVideoSend(_mainSsrc, NULL, nullptr); + + _threads->getWorkerThread()->Invoke(RTC_FROM_HERE, [&]() { + _outgoingVideoChannel->media_channel()->SetVideoSend(_mainSsrc, NULL, nullptr); + }); } } @@ -1044,16 +1082,15 @@ public: webrtc::RtpTransport *rtpTransport, rtc::UniqueRandomIdGenerator *randomIdGenerator, NegotiatedMediaContent const &mediaContent, + std::string const &streamId, std::shared_ptr threads) : _channelManager(channelManager), _call(call) { _videoSink.reset(new VideoSinkImpl()); - std::string streamId = "1"; - _videoBitrateAllocatorFactory = webrtc::CreateBuiltinVideoBitrateAllocatorFactory(); - _videoChannel = _channelManager->CreateVideoChannel(call, cricket::MediaConfig(), rtpTransport, threads->getMediaThread(), "1", false, NativeNetworkingImpl::getDefaulCryptoOptions(), randomIdGenerator, cricket::VideoOptions(), _videoBitrateAllocatorFactory.get()); + _videoChannel = _channelManager->CreateVideoChannel(call, cricket::MediaConfig(), rtpTransport, threads->getMediaThread(), streamId, false, NativeNetworkingImpl::getDefaulCryptoOptions(), randomIdGenerator, cricket::VideoOptions(), _videoBitrateAllocatorFactory.get()); std::vector videoCodecs = mediaContent.codecs; @@ -1065,7 +1102,7 @@ public: outgoingVideoDescription->set_rtcp_reduced_size(true); outgoingVideoDescription->set_direction(webrtc::RtpTransceiverDirection::kRecvOnly); outgoingVideoDescription->set_codecs(videoCodecs); - outgoingVideoDescription->set_bandwidth(1032000); + outgoingVideoDescription->set_bandwidth(-1); cricket::StreamParams videoRecvStreamParams; @@ -1095,18 +1132,17 @@ public: incomingVideoDescription->set_rtcp_reduced_size(true); incomingVideoDescription->set_direction(webrtc::RtpTransceiverDirection::kSendOnly); incomingVideoDescription->set_codecs(videoCodecs); - incomingVideoDescription->set_bandwidth(1032000); + incomingVideoDescription->set_bandwidth(-1); incomingVideoDescription->AddStream(videoRecvStreamParams); - _videoChannel->SetPayloadTypeDemuxingEnabled(false); - _videoChannel->SetLocalContent(outgoingVideoDescription.get(), webrtc::SdpType::kOffer, nullptr); - _videoChannel->SetRemoteContent(incomingVideoDescription.get(), webrtc::SdpType::kAnswer, nullptr); + threads->getWorkerThread()->Invoke(RTC_FROM_HERE, [&]() { + _videoChannel->SetPayloadTypeDemuxingEnabled(false); + _videoChannel->SetLocalContent(outgoingVideoDescription.get(), webrtc::SdpType::kOffer, nullptr); + _videoChannel->SetRemoteContent(incomingVideoDescription.get(), webrtc::SdpType::kAnswer, nullptr); - _videoChannel->media_channel()->SetSink(_mainVideoSsrc, _videoSink.get()); - - _videoChannel->SignalSentPacket().connect(this, &IncomingV2VideoChannel::OnSentPacket_w); - //_videoChannel->UpdateRtpTransport(nullptr); + _videoChannel->media_channel()->SetSink(_mainVideoSsrc, _videoSink.get()); + }); _videoChannel->Enable(true); } @@ -1162,6 +1198,20 @@ public: _networking->perform(RTC_FROM_HERE, [](NativeNetworkingImpl *networking) { networking->stop(); }); + + _incomingAudioChannel.reset(); + _incomingVideoChannel.reset(); + _incomingScreencastChannel.reset(); + _outgoingAudioChannel.reset(); + _outgoingVideoChannel.reset(); + _outgoingScreencastChannel.reset(); + _currentSink.reset(); + + _threads->getWorkerThread()->Invoke(RTC_FROM_HERE, [&]() { + _channelManager.reset(); + _call.reset(); + _audioDeviceModule = nullptr; + }); _threads->getNetworkThread()->Invoke(RTC_FROM_HERE, []() { }); } @@ -1170,7 +1220,7 @@ public: const auto weak = std::weak_ptr(shared_from_this()); _networking.reset(new ThreadLocalObject(_threads->getNetworkThread(), [weak, threads = _threads, isOutgoing = _encryptionKey.isOutgoing, rtcServers = _rtcServers]() { - return new NativeNetworkingImpl((NativeNetworkingImpl::Configuration){ + return new NativeNetworkingImpl(NativeNetworkingImpl::Configuration{ .isOutgoing = isOutgoing, .enableStunMarking = false, .enableTCP = false, @@ -1204,13 +1254,11 @@ public: }); }, .rtcpPacketReceived = [threads, weak](rtc::CopyOnWriteBuffer const &packet, int64_t timestamp) { - threads->getMediaThread()->PostTask(RTC_FROM_HERE, [=] { - const auto strong = weak.lock(); - if (!strong) { - return; - } - strong->_call->Receiver()->DeliverPacket(webrtc::MediaType::ANY, packet, timestamp); - }); + const auto strong = weak.lock(); + if (!strong) { + return; + } + strong->_call->Receiver()->DeliverPacket(webrtc::MediaType::ANY, packet, timestamp); }, .dataChannelStateUpdated = [threads, weak](bool isDataChannelOpen) { threads->getMediaThread()->PostTask(RTC_FROM_HERE, [=] { @@ -1236,40 +1284,42 @@ public: PlatformInterface::SharedInstance()->configurePlatformAudio(); - cricket::MediaEngineDependencies mediaDeps; - mediaDeps.task_queue_factory = _taskQueueFactory.get(); - mediaDeps.audio_encoder_factory = webrtc::CreateAudioEncoderFactory(); - mediaDeps.audio_decoder_factory = webrtc::CreateAudioDecoderFactory(); - - mediaDeps.video_encoder_factory = PlatformInterface::SharedInstance()->makeVideoEncoderFactory(); - mediaDeps.video_decoder_factory = PlatformInterface::SharedInstance()->makeVideoDecoderFactory(); - - _audioDeviceModule = createAudioDeviceModule(); - if (!_audioDeviceModule) { - return; - } - mediaDeps.adm = _audioDeviceModule; - - _availableVideoFormats = mediaDeps.video_encoder_factory->GetSupportedFormats(); - - std::unique_ptr mediaEngine = cricket::CreateMediaEngine(std::move(mediaDeps)); - - _channelManager = cricket::ChannelManager::Create( - std::move(mediaEngine), - std::make_unique(), - true, - _threads->getMediaThread(), - _threads->getNetworkThread() - ); - //setAudioInputDevice(_initialInputDeviceId); //setAudioOutputDevice(_initialOutputDeviceId); - webrtc::Call::Config callConfig(_eventLog.get()); - callConfig.task_queue_factory = _taskQueueFactory.get(); - callConfig.trials = &_fieldTrials; - callConfig.audio_state = _channelManager->media_engine()->voice().GetAudioState(); - _call.reset(webrtc::Call::Create(callConfig)); + _threads->getWorkerThread()->Invoke(RTC_FROM_HERE, [&]() { + cricket::MediaEngineDependencies mediaDeps; + mediaDeps.task_queue_factory = _taskQueueFactory.get(); + mediaDeps.audio_encoder_factory = webrtc::CreateAudioEncoderFactory(); + mediaDeps.audio_decoder_factory = webrtc::CreateAudioDecoderFactory(); + + mediaDeps.video_encoder_factory = PlatformInterface::SharedInstance()->makeVideoEncoderFactory(true); + mediaDeps.video_decoder_factory = PlatformInterface::SharedInstance()->makeVideoDecoderFactory(); + + _audioDeviceModule = createAudioDeviceModule(); + /*if (!_audioDeviceModule) { + return; + }*/ + mediaDeps.adm = _audioDeviceModule; + + _availableVideoFormats = mediaDeps.video_encoder_factory->GetSupportedFormats(); + + std::unique_ptr mediaEngine = cricket::CreateMediaEngine(std::move(mediaDeps)); + + _channelManager = cricket::ChannelManager::Create( + std::move(mediaEngine), + true, + _threads->getWorkerThread(), + _threads->getNetworkThread() + ); + + webrtc::Call::Config callConfig(_eventLog.get()); + callConfig.task_queue_factory = _taskQueueFactory.get(); + callConfig.trials = &_fieldTrials; + callConfig.audio_state = _channelManager->media_engine()->voice().GetAudioState(); + + _call.reset(webrtc::Call::Create(callConfig, webrtc::Clock::GetRealTimeClock(), _threads->getSharedModuleThread(), webrtc::ProcessThread::Create("PacerThread"))); + }); _uniqueRandomIdGenerator.reset(new rtc::UniqueRandomIdGenerator()); @@ -1313,7 +1363,8 @@ public: if (_encryptionKey.isOutgoing) { _outgoingAudioContent = OutgoingAudioChannel::createOutgoingContentDescription(); - _outgoingVideoContent = OutgoingVideoChannel::createOutgoingContentDescription(_availableVideoFormats); + _outgoingVideoContent = OutgoingVideoChannel::createOutgoingContentDescription(_availableVideoFormats, false); + _outgoingScreencastContent = OutgoingVideoChannel::createOutgoingContentDescription(_availableVideoFormats, true); sendInitialSetup(); } @@ -1339,7 +1390,8 @@ public: strong->sendMediaState(); }); }, - _negotiatedOutgoingVideoContent.value() + _negotiatedOutgoingVideoContent.value(), + false )); if (_videoCapture) { @@ -1347,6 +1399,34 @@ public: } } + if (_negotiatedOutgoingScreencastContent) { + const auto weak = std::weak_ptr(shared_from_this()); + + _outgoingScreencastChannel.reset(new OutgoingVideoChannel( + _threads, + _channelManager.get(), + _call.get(), + _rtpTransport, + _uniqueRandomIdGenerator.get(), + _videoBitrateAllocatorFactory.get(), + [threads = _threads, weak]() { + threads->getMediaThread()->PostTask(RTC_FROM_HERE, [=] { + const auto strong = weak.lock(); + if (!strong) { + return; + } + strong->sendMediaState(); + }); + }, + _negotiatedOutgoingScreencastContent.value(), + true + )); + + if (_screencastCapture) { + _outgoingScreencastChannel->setVideoCapture(_screencastCapture); + } + } + if (_negotiatedOutgoingAudioContent) { _outgoingAudioChannel.reset(new OutgoingAudioChannel( _call.get(), @@ -1394,6 +1474,9 @@ public: if (strong->_outgoingVideoContent) { data.video = strong->_outgoingVideoContent.value(); } + if (strong->_outgoingScreencastContent) { + data.screencast = strong->_outgoingScreencastContent.value(); + } data.ufrag = ufrag; data.pwd = pwd; @@ -1529,11 +1612,13 @@ public: _rtpTransport, _uniqueRandomIdGenerator.get(), incomingVideoContent, + "1", _threads )); + _incomingVideoChannel->addSink(_currentSink); } } else { - const auto generatedOutgoingContent = OutgoingVideoChannel::createOutgoingContentDescription(_availableVideoFormats); + const auto generatedOutgoingContent = OutgoingVideoChannel::createOutgoingContentDescription(_availableVideoFormats, false); if (generatedOutgoingContent) { _negotiatedOutgoingVideoContent = negotiateMediaContent(generatedOutgoingContent.value(), generatedOutgoingContent.value(), video.value(), true); @@ -1555,8 +1640,68 @@ public: _rtpTransport, _uniqueRandomIdGenerator.get(), incomingVideoContent, + "1", _threads )); + _incomingVideoChannel->addSink(_currentSink); + } + } + } + } + + if (const auto screencast = initialSetup->screencast) { + if (_encryptionKey.isOutgoing) { + if (_outgoingScreencastContent) { + _negotiatedOutgoingScreencastContent = negotiateMediaContent(_outgoingScreencastContent.value(), _outgoingScreencastContent.value(), screencast.value(), false); + const auto incomingScreencastContent = negotiateMediaContent(screencast.value(), _outgoingScreencastContent.value(), screencast.value(), false); + + signaling::MediaContent outgoingScreencastContent; + + outgoingScreencastContent.ssrc = _outgoingScreencastContent->ssrc; + outgoingScreencastContent.ssrcGroups = _outgoingScreencastContent->ssrcGroups; + outgoingScreencastContent.rtpExtensions = _negotiatedOutgoingScreencastContent->rtpExtensions; + outgoingScreencastContent.payloadTypes = getPayloadTypesFromVideoCodecs(_negotiatedOutgoingScreencastContent->codecs); + + _outgoingScreencastContent = std::move(outgoingScreencastContent); + + _incomingScreencastChannel.reset(new IncomingV2VideoChannel( + _channelManager.get(), + _call.get(), + _rtpTransport, + _uniqueRandomIdGenerator.get(), + incomingScreencastContent, + "2", + _threads + )); + _incomingScreencastChannel->addSink(_currentSink); + } + } else { + const auto generatedOutgoingContent = OutgoingVideoChannel::createOutgoingContentDescription(_availableVideoFormats, true); + + if (generatedOutgoingContent) { + _negotiatedOutgoingScreencastContent = negotiateMediaContent(generatedOutgoingContent.value(), generatedOutgoingContent.value(), screencast.value(), true); + const auto incomingScreencastContent = negotiateMediaContent(screencast.value(), generatedOutgoingContent.value(), screencast.value(), true); + + if (_negotiatedOutgoingScreencastContent) { + signaling::MediaContent outgoingScreencastContent; + + outgoingScreencastContent.ssrc = generatedOutgoingContent->ssrc; + outgoingScreencastContent.ssrcGroups = generatedOutgoingContent->ssrcGroups; + outgoingScreencastContent.rtpExtensions = _negotiatedOutgoingScreencastContent->rtpExtensions; + outgoingScreencastContent.payloadTypes = getPayloadTypesFromVideoCodecs(_negotiatedOutgoingScreencastContent->codecs); + + _outgoingScreencastContent = std::move(outgoingScreencastContent); + + _incomingScreencastChannel.reset(new IncomingV2VideoChannel( + _channelManager.get(), + _call.get(), + _rtpTransport, + _uniqueRandomIdGenerator.get(), + incomingScreencastContent, + "2", + _threads + )); + _incomingScreencastChannel->addSink(_currentSink); } } } @@ -1611,8 +1756,33 @@ public: } } + VideoState mappedScreencastState; + switch (mediaState->screencastState) { + case signaling::MediaStateMessage::VideoState::Inactive: { + mappedScreencastState = VideoState::Inactive; + break; + } + case signaling::MediaStateMessage::VideoState::Suspended: { + mappedScreencastState = VideoState::Paused; + break; + } + case signaling::MediaStateMessage::VideoState::Active: { + mappedScreencastState = VideoState::Active; + break; + } + default: { + RTC_FATAL() << "Unknown videoState"; + break; + } + } + + VideoState effectiveVideoState = mappedVideoState; + if (mappedScreencastState == VideoState::Active || mappedScreencastState == VideoState::Paused) { + effectiveVideoState = mappedScreencastState; + } + if (_remoteMediaStateUpdated) { - _remoteMediaStateUpdated(mappedAudioState, mappedVideoState); + _remoteMediaStateUpdated(mappedAudioState, effectiveVideoState); } if (_remoteBatteryLevelIsLowUpdated) { @@ -1689,6 +1859,15 @@ public: data.videoState = signaling::MediaStateMessage::VideoState::Inactive; data.videoRotation = signaling::MediaStateMessage::VideoRotation::Rotation0; } + if (_outgoingScreencastChannel) { + if (_outgoingScreencastChannel->videoCapture()) { + data.screencastState = signaling::MediaStateMessage::VideoState::Active; + } else{ + data.screencastState = signaling::MediaStateMessage::VideoState::Inactive; + } + } else { + data.screencastState = signaling::MediaStateMessage::VideoState::Inactive; + } message.data = std::move(data); sendDataChannelMessage(message); } @@ -1706,6 +1885,7 @@ public: std::string serialized; const auto success = iceCandidate.ToString(&serialized); assert(success); + (void)success; serializedCandidate.sdpString = serialized; @@ -1717,19 +1897,55 @@ public: } void setVideoCapture(std::shared_ptr videoCapture) { - _videoCapture = videoCapture; - - if (_outgoingVideoChannel) { - _outgoingVideoChannel->setVideoCapture(videoCapture); + auto videoCaptureImpl = GetVideoCaptureAssumingSameThread(videoCapture.get()); + if (videoCaptureImpl) { + if (videoCaptureImpl->isScreenCapture()) { + _videoCapture = nullptr; + _screencastCapture = videoCapture; + + if (_outgoingVideoChannel) { + _outgoingVideoChannel->setVideoCapture(nullptr); + } + + if (_outgoingScreencastChannel) { + _outgoingScreencastChannel->setVideoCapture(videoCapture); + } + + sendMediaState(); + adjustBitratePreferences(true); + } else { + _videoCapture = videoCapture; + _screencastCapture = nullptr; + + if (_outgoingVideoChannel) { + _outgoingVideoChannel->setVideoCapture(videoCapture); + } + + if (_outgoingScreencastChannel) { + _outgoingScreencastChannel->setVideoCapture(nullptr); + } + + sendMediaState(); + adjustBitratePreferences(true); + } + } else { + _videoCapture = nullptr; + _screencastCapture = nullptr; + + if (_outgoingVideoChannel) { + _outgoingVideoChannel->setVideoCapture(nullptr); + } + + if (_outgoingScreencastChannel) { + _outgoingScreencastChannel->setVideoCapture(nullptr); + } sendMediaState(); - adjustBitratePreferences(true); } } void setRequestedVideoAspect(float aspect) { - } void setNetworkType(NetworkType networkType) { @@ -1748,10 +1964,14 @@ public: } } - void setIncomingVideoOutput(std::shared_ptr> sink) { + void setIncomingVideoOutput(std::weak_ptr> sink) { + _currentSink = sink; if (_incomingVideoChannel) { _incomingVideoChannel->addSink(sink); } + if (_incomingScreencastChannel) { + _incomingScreencastChannel->addSink(sink); + } } void setAudioInputDevice(std::string id) { @@ -1775,7 +1995,7 @@ public: void adjustBitratePreferences(bool resetStartBitrate) { webrtc::BitrateConstraints preferences; - if (_videoCapture) { + if (_videoCapture || _screencastCapture) { preferences.min_bitrate_bps = 64000; if (resetStartBitrate) { preferences.start_bitrate_bps = (100 + 800 + 32 + 100) * 1000; @@ -1795,9 +2015,13 @@ public: private: rtc::scoped_refptr createAudioDeviceModule() { const auto create = [&](webrtc::AudioDeviceModule::AudioLayer layer) { +#ifdef WEBRTC_IOS + return rtc::make_ref_counted(false, false); +#else return webrtc::AudioDeviceModule::Create( layer, _taskQueueFactory.get()); +#endif }; const auto check = [&](const rtc::scoped_refptr &result) { return (result && result->Init() == 0) ? result : nullptr; @@ -1855,14 +2079,22 @@ private: absl::optional _outgoingVideoContent; absl::optional> _negotiatedOutgoingVideoContent; + absl::optional _outgoingScreencastContent; + absl::optional> _negotiatedOutgoingScreencastContent; + std::shared_ptr _outgoingVideoChannel; + std::shared_ptr _outgoingScreencastChannel; bool _isBatteryLow = false; std::unique_ptr _incomingAudioChannel; std::unique_ptr _incomingVideoChannel; + std::unique_ptr _incomingScreencastChannel; + + std::weak_ptr> _currentSink; std::shared_ptr _videoCapture; + std::shared_ptr _screencastCapture; }; InstanceV2Impl::InstanceV2Impl(Descriptor &&descriptor) { @@ -1918,7 +2150,7 @@ void InstanceV2Impl::setMuteMicrophone(bool muteMicrophone) { }); } -void InstanceV2Impl::setIncomingVideoOutput(std::shared_ptr> sink) { +void InstanceV2Impl::setIncomingVideoOutput(std::weak_ptr> sink) { _internal->perform(RTC_FROM_HERE, [sink](InstanceV2ImplInternal *internal) { internal->setIncomingVideoOutput(sink); }); diff --git a/TMessagesProj/jni/voip/tgcalls/v2/InstanceV2Impl.h b/TMessagesProj/jni/voip/tgcalls/v2/InstanceV2Impl.h index 690a17353..541f09bb2 100644 --- a/TMessagesProj/jni/voip/tgcalls/v2/InstanceV2Impl.h +++ b/TMessagesProj/jni/voip/tgcalls/v2/InstanceV2Impl.h @@ -27,7 +27,7 @@ public: bool supportsVideo() override { return true; } - void setIncomingVideoOutput(std::shared_ptr> sink) override; + void setIncomingVideoOutput(std::weak_ptr> sink) override; void setAudioOutputGainControlEnabled(bool enabled) override; void setEchoCancellationStrength(int strength) override; void setAudioInputDevice(std::string id) override; diff --git a/TMessagesProj/jni/voip/tgcalls/v2/NativeNetworkingImpl.cpp b/TMessagesProj/jni/voip/tgcalls/v2/NativeNetworkingImpl.cpp index c9024c932..f234ae73a 100644 --- a/TMessagesProj/jni/voip/tgcalls/v2/NativeNetworkingImpl.cpp +++ b/TMessagesProj/jni/voip/tgcalls/v2/NativeNetworkingImpl.cpp @@ -45,7 +45,7 @@ _dataChannelMessageReceived(configuration.dataChannelMessageReceived) { _localCertificate = rtc::RTCCertificateGenerator::GenerateCertificate(rtc::KeyParams(rtc::KT_ECDSA), absl::nullopt); - _socketFactory.reset(new rtc::BasicPacketSocketFactory(_threads->getNetworkThread())); + _socketFactory.reset(new rtc::BasicPacketSocketFactory(_threads->getNetworkThread()->socketserver())); _networkManager = std::make_unique(); _asyncResolverFactory = std::make_unique(); diff --git a/TMessagesProj/jni/voip/tgcalls/v2/NativeNetworkingImpl.h b/TMessagesProj/jni/voip/tgcalls/v2/NativeNetworkingImpl.h index ead493519..9383c6beb 100644 --- a/TMessagesProj/jni/voip/tgcalls/v2/NativeNetworkingImpl.h +++ b/TMessagesProj/jni/voip/tgcalls/v2/NativeNetworkingImpl.h @@ -10,7 +10,8 @@ #include "rtc_base/third_party/sigslot/sigslot.h" #include "api/candidate.h" #include "media/base/media_channel.h" -#include "media/sctp/sctp_transport.h" +//#include "media/sctp/sctp_transport.h" +#include "rtc_base/ssl_fingerprint.h" #include "pc/sctp_data_channel.h" #include diff --git a/TMessagesProj/jni/voip/tgcalls/v2/Signaling.cpp b/TMessagesProj/jni/voip/tgcalls/v2/Signaling.cpp index 0de171cc8..a98c3f8be 100644 --- a/TMessagesProj/jni/voip/tgcalls/v2/Signaling.cpp +++ b/TMessagesProj/jni/voip/tgcalls/v2/Signaling.cpp @@ -325,6 +325,10 @@ std::vector InitialSetupMessage_serialize(const InitialSetupMessage * c object.insert(std::make_pair("video", json11::Json(MediaContent_serialize(video.value())))); } + if (const auto screencast = message->screencast) { + object.insert(std::make_pair("screencast", json11::Json(MediaContent_serialize(screencast.value())))); + } + auto json = json11::Json(std::move(object)); std::string result = json.dump(); return std::vector(result.begin(), result.end()); @@ -398,6 +402,18 @@ absl::optional InitialSetupMessage_parse(json11::Json::obje } } + const auto screencast = object.find("screencast"); + if (screencast != object.end()) { + if (!screencast->second.is_object()) { + return absl::nullopt; + } + if (const auto parsedScreencast = MediaContent_parse(screencast->second.object_items())) { + message.screencast = parsedScreencast.value(); + } else { + return absl::nullopt; + } + } + return message; } @@ -529,6 +545,27 @@ std::vector MediaStateMessage_serialize(const MediaStateMessage * const } object.insert(std::make_pair("videoRotation", json11::Json(videoRotationValue))); + std::string screencastStateValue; + switch (message->screencastState) { + case MediaStateMessage::VideoState::Inactive: { + screencastStateValue = "inactive"; + break; + } + case MediaStateMessage::VideoState::Suspended: { + screencastStateValue = "suspended"; + break; + } + case MediaStateMessage::VideoState::Active: { + screencastStateValue = "active"; + break; + } + default: { + RTC_FATAL() << "Unknown videoState"; + break; + } + } + object.insert(std::make_pair("screencastState", json11::Json(screencastStateValue))); + auto json = json11::Json(std::move(object)); std::string result = json.dump(); return std::vector(result.begin(), result.end()); @@ -569,6 +606,22 @@ absl::optional MediaStateMessage_parse(json11::Json::object c message.videoState = MediaStateMessage::VideoState::Inactive; } + const auto screencastState = object.find("screencastState"); + if (screencastState != object.end()) { + if (!screencastState->second.is_string()) { + return absl::nullopt; + } + if (screencastState->second.string_value() == "inactive") { + message.screencastState = MediaStateMessage::VideoState::Inactive; + } else if (screencastState->second.string_value() == "suspended") { + message.screencastState = MediaStateMessage::VideoState::Suspended; + } else if (screencastState->second.string_value() == "active") { + message.screencastState = MediaStateMessage::VideoState::Active; + } + } else { + message.screencastState = MediaStateMessage::VideoState::Inactive; + } + const auto videoRotation = object.find("videoRotation"); if (videoRotation != object.end()) { if (!videoRotation->second.is_number()) { diff --git a/TMessagesProj/jni/voip/tgcalls/v2/Signaling.h b/TMessagesProj/jni/voip/tgcalls/v2/Signaling.h index 303d46afa..af957318f 100644 --- a/TMessagesProj/jni/voip/tgcalls/v2/Signaling.h +++ b/TMessagesProj/jni/voip/tgcalls/v2/Signaling.h @@ -59,6 +59,7 @@ struct InitialSetupMessage { std::vector fingerprints; absl::optional audio; absl::optional video; + absl::optional screencast; }; struct CandidatesMessage { @@ -82,6 +83,7 @@ struct MediaStateMessage { bool isMuted = false; VideoState videoState = VideoState::Inactive; VideoRotation videoRotation = VideoRotation::Rotation0; + VideoState screencastState = VideoState::Inactive; bool isBatteryLow = false; }; diff --git a/TMessagesProj/jni/voip/webrtc/absl/algorithm/container.h b/TMessagesProj/jni/voip/webrtc/absl/algorithm/container.h index d72532dec..26b195292 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/algorithm/container.h +++ b/TMessagesProj/jni/voip/webrtc/absl/algorithm/container.h @@ -90,10 +90,10 @@ using ContainerPointerType = // lookup of std::begin and std::end, i.e. // using std::begin; // using std::end; -// std::foo(begin(c), end(c); +// std::foo(begin(c), end(c)); // becomes // std::foo(container_algorithm_internal::begin(c), -// container_algorithm_internal::end(c)); +// container_algorithm_internal::end(c)); // These are meant for internal use only. template @@ -166,7 +166,7 @@ container_algorithm_internal::ContainerDifferenceType c_distance( // c_all_of() // // Container-based version of the `std::all_of()` function to -// test a condition on all elements within a container. +// test if all elements within a container satisfy a condition. template bool c_all_of(const C& c, Pred&& pred) { return std::all_of(container_algorithm_internal::c_begin(c), @@ -188,7 +188,7 @@ bool c_any_of(const C& c, Pred&& pred) { // c_none_of() // // Container-based version of the `std::none_of()` function to -// test if no elements in a container fulfil a condition. +// test if no elements in a container fulfill a condition. template bool c_none_of(const C& c, Pred&& pred) { return std::none_of(container_algorithm_internal::c_begin(c), @@ -340,24 +340,45 @@ container_algorithm_internal::ContainerDifferenceType c_count_if( // c_mismatch() // // Container-based version of the `std::mismatch()` function to -// return the first element where two ordered containers differ. +// return the first element where two ordered containers differ. Applies `==` to +// the first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)). template container_algorithm_internal::ContainerIterPairType c_mismatch(C1& c1, C2& c2) { - return std::mismatch(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2)); + auto first1 = container_algorithm_internal::c_begin(c1); + auto last1 = container_algorithm_internal::c_end(c1); + auto first2 = container_algorithm_internal::c_begin(c2); + auto last2 = container_algorithm_internal::c_end(c2); + + for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) { + // Negates equality because Cpp17EqualityComparable doesn't require clients + // to overload both `operator==` and `operator!=`. + if (!(*first1 == *first2)) { + break; + } + } + + return std::make_pair(first1, first2); } // Overload of c_mismatch() for using a predicate evaluation other than `==` as -// the function's test condition. +// the function's test condition. Applies `pred`to the first N elements of `c1` +// and `c2`, where N = min(size(c1), size(c2)). template container_algorithm_internal::ContainerIterPairType -c_mismatch(C1& c1, C2& c2, BinaryPredicate&& pred) { - return std::mismatch(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2), - std::forward(pred)); +c_mismatch(C1& c1, C2& c2, BinaryPredicate pred) { + auto first1 = container_algorithm_internal::c_begin(c1); + auto last1 = container_algorithm_internal::c_end(c1); + auto first2 = container_algorithm_internal::c_begin(c2); + auto last2 = container_algorithm_internal::c_end(c2); + + for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) { + if (!pred(*first1, *first2)) { + break; + } + } + + return std::make_pair(first1, first2); } // c_equal() @@ -539,12 +560,20 @@ BidirectionalIterator c_move_backward(C&& src, BidirectionalIterator dest) { // c_swap_ranges() // // Container-based version of the `std::swap_ranges()` function to -// swap a container's elements with another container's elements. +// swap a container's elements with another container's elements. Swaps the +// first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)). template container_algorithm_internal::ContainerIter c_swap_ranges(C1& c1, C2& c2) { - return std::swap_ranges(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2)); + auto first1 = container_algorithm_internal::c_begin(c1); + auto last1 = container_algorithm_internal::c_end(c1); + auto first2 = container_algorithm_internal::c_begin(c2); + auto last2 = container_algorithm_internal::c_end(c2); + + using std::swap; + for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) { + swap(*first1, *first2); + } + return first2; } // c_transform() @@ -562,16 +591,23 @@ OutputIterator c_transform(const InputSequence& input, OutputIterator output, } // Overload of c_transform() for performing a transformation using a binary -// predicate. +// predicate. Applies `binary_op` to the first N elements of `c1` and `c2`, +// where N = min(size(c1), size(c2)). template OutputIterator c_transform(const InputSequence1& input1, const InputSequence2& input2, OutputIterator output, BinaryOp&& binary_op) { - return std::transform(container_algorithm_internal::c_begin(input1), - container_algorithm_internal::c_end(input1), - container_algorithm_internal::c_begin(input2), output, - std::forward(binary_op)); + auto first1 = container_algorithm_internal::c_begin(input1); + auto last1 = container_algorithm_internal::c_end(input1); + auto first2 = container_algorithm_internal::c_begin(input2); + auto last2 = container_algorithm_internal::c_end(input2); + for (; first1 != last1 && first2 != last2; + ++first1, (void)++first2, ++output) { + *output = binary_op(*first1, *first2); + } + + return output; } // c_replace() @@ -869,11 +905,11 @@ void c_sort(C& c) { // Overload of c_sort() for performing a `comp` comparison other than the // default `operator<`. -template -void c_sort(C& c, Compare&& comp) { +template +void c_sort(C& c, LessThan&& comp) { std::sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), - std::forward(comp)); + std::forward(comp)); } // c_stable_sort() @@ -889,11 +925,11 @@ void c_stable_sort(C& c) { // Overload of c_stable_sort() for performing a `comp` comparison other than the // default `operator<`. -template -void c_stable_sort(C& c, Compare&& comp) { +template +void c_stable_sort(C& c, LessThan&& comp) { std::stable_sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), - std::forward(comp)); + std::forward(comp)); } // c_is_sorted() @@ -908,11 +944,11 @@ bool c_is_sorted(const C& c) { // c_is_sorted() overload for performing a `comp` comparison other than the // default `operator<`. -template -bool c_is_sorted(const C& c, Compare&& comp) { +template +bool c_is_sorted(const C& c, LessThan&& comp) { return std::is_sorted(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), - std::forward(comp)); + std::forward(comp)); } // c_partial_sort() @@ -930,22 +966,23 @@ void c_partial_sort( // Overload of c_partial_sort() for performing a `comp` comparison other than // the default `operator<`. -template +template void c_partial_sort( RandomAccessContainer& sequence, container_algorithm_internal::ContainerIter middle, - Compare&& comp) { + LessThan&& comp) { std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } // c_partial_sort_copy() // // Container-based version of the `std::partial_sort_copy()` -// function to sort elements within a container such that elements before -// `middle` are sorted in ascending order, and return the result within an -// iterator. +// function to sort the elements in the given range `result` within the larger +// `sequence` in ascending order (and using `result` as the output parameter). +// At most min(result.last - result.first, sequence.last - sequence.first) +// elements from the sequence will be stored in the result. template container_algorithm_internal::ContainerIter c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) { @@ -957,15 +994,15 @@ c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) { // Overload of c_partial_sort_copy() for performing a `comp` comparison other // than the default `operator<`. -template +template container_algorithm_internal::ContainerIter c_partial_sort_copy(const C& sequence, RandomAccessContainer& result, - Compare&& comp) { + LessThan&& comp) { return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(result), container_algorithm_internal::c_end(result), - std::forward(comp)); + std::forward(comp)); } // c_is_sorted_until() @@ -981,12 +1018,12 @@ container_algorithm_internal::ContainerIter c_is_sorted_until(C& c) { // Overload of c_is_sorted_until() for performing a `comp` comparison other than // the default `operator<`. -template +template container_algorithm_internal::ContainerIter c_is_sorted_until( - C& c, Compare&& comp) { + C& c, LessThan&& comp) { return std::is_sorted_until(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), - std::forward(comp)); + std::forward(comp)); } // c_nth_element() @@ -1006,14 +1043,14 @@ void c_nth_element( // Overload of c_nth_element() for performing a `comp` comparison other than // the default `operator<`. -template +template void c_nth_element( RandomAccessContainer& sequence, container_algorithm_internal::ContainerIter nth, - Compare&& comp) { + LessThan&& comp) { std::nth_element(container_algorithm_internal::c_begin(sequence), nth, container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } //------------------------------------------------------------------------------ @@ -1035,12 +1072,12 @@ container_algorithm_internal::ContainerIter c_lower_bound( // Overload of c_lower_bound() for performing a `comp` comparison other than // the default `operator<`. -template +template container_algorithm_internal::ContainerIter c_lower_bound( - Sequence& sequence, T&& value, Compare&& comp) { + Sequence& sequence, T&& value, LessThan&& comp) { return std::lower_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(value), std::forward(comp)); + std::forward(value), std::forward(comp)); } // c_upper_bound() @@ -1058,12 +1095,12 @@ container_algorithm_internal::ContainerIter c_upper_bound( // Overload of c_upper_bound() for performing a `comp` comparison other than // the default `operator<`. -template +template container_algorithm_internal::ContainerIter c_upper_bound( - Sequence& sequence, T&& value, Compare&& comp) { + Sequence& sequence, T&& value, LessThan&& comp) { return std::upper_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(value), std::forward(comp)); + std::forward(value), std::forward(comp)); } // c_equal_range() @@ -1081,12 +1118,12 @@ c_equal_range(Sequence& sequence, T&& value) { // Overload of c_equal_range() for performing a `comp` comparison other than // the default `operator<`. -template +template container_algorithm_internal::ContainerIterPairType -c_equal_range(Sequence& sequence, T&& value, Compare&& comp) { +c_equal_range(Sequence& sequence, T&& value, LessThan&& comp) { return std::equal_range(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(value), std::forward(comp)); + std::forward(value), std::forward(comp)); } // c_binary_search() @@ -1103,12 +1140,12 @@ bool c_binary_search(Sequence&& sequence, T&& value) { // Overload of c_binary_search() for performing a `comp` comparison other than // the default `operator<`. -template -bool c_binary_search(Sequence&& sequence, T&& value, Compare&& comp) { +template +bool c_binary_search(Sequence&& sequence, T&& value, LessThan&& comp) { return std::binary_search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value), - std::forward(comp)); + std::forward(comp)); } //------------------------------------------------------------------------------ @@ -1129,14 +1166,14 @@ OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) { // Overload of c_merge() for performing a `comp` comparison other than // the default `operator<`. -template +template OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result, - Compare&& comp) { + LessThan&& comp) { return std::merge(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), result, - std::forward(comp)); + std::forward(comp)); } // c_inplace_merge() @@ -1152,13 +1189,13 @@ void c_inplace_merge(C& c, // Overload of c_inplace_merge() for performing a merge using a `comp` other // than `operator<`. -template +template void c_inplace_merge(C& c, container_algorithm_internal::ContainerIter middle, - Compare&& comp) { + LessThan&& comp) { std::inplace_merge(container_algorithm_internal::c_begin(c), middle, container_algorithm_internal::c_end(c), - std::forward(comp)); + std::forward(comp)); } // c_includes() @@ -1176,13 +1213,13 @@ bool c_includes(const C1& c1, const C2& c2) { // Overload of c_includes() for performing a merge using a `comp` other than // `operator<`. -template -bool c_includes(const C1& c1, const C2& c2, Compare&& comp) { +template +bool c_includes(const C1& c1, const C2& c2, LessThan&& comp) { return std::includes(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), - std::forward(comp)); + std::forward(comp)); } // c_set_union() @@ -1206,7 +1243,7 @@ OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) { // Overload of c_set_union() for performing a merge using a `comp` other than // `operator<`. -template ::value, void>::type, @@ -1214,18 +1251,18 @@ template ::value, void>::type> OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output, - Compare&& comp) { + LessThan&& comp) { return std::set_union(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, - std::forward(comp)); + std::forward(comp)); } // c_set_intersection() // // Container-based version of the `std::set_intersection()` function -// to return an iterator containing the intersection of two containers. +// to return an iterator containing the intersection of two sorted containers. template ::value, @@ -1235,6 +1272,11 @@ template ::type> OutputIterator c_set_intersection(const C1& c1, const C2& c2, OutputIterator output) { + // In debug builds, ensure that both containers are sorted with respect to the + // default comparator. std::set_intersection requires the containers be sorted + // using operator<. + assert(absl::c_is_sorted(c1)); + assert(absl::c_is_sorted(c2)); return std::set_intersection(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1243,7 +1285,7 @@ OutputIterator c_set_intersection(const C1& c1, const C2& c2, // Overload of c_set_intersection() for performing a merge using a `comp` other // than `operator<`. -template ::value, void>::type, @@ -1251,12 +1293,17 @@ template ::value, void>::type> OutputIterator c_set_intersection(const C1& c1, const C2& c2, - OutputIterator output, Compare&& comp) { + OutputIterator output, LessThan&& comp) { + // In debug builds, ensure that both containers are sorted with respect to the + // default comparator. std::set_intersection requires the containers be sorted + // using the same comparator. + assert(absl::c_is_sorted(c1, comp)); + assert(absl::c_is_sorted(c2, comp)); return std::set_intersection(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, - std::forward(comp)); + std::forward(comp)); } // c_set_difference() @@ -1281,7 +1328,7 @@ OutputIterator c_set_difference(const C1& c1, const C2& c2, // Overload of c_set_difference() for performing a merge using a `comp` other // than `operator<`. -template ::value, void>::type, @@ -1289,12 +1336,12 @@ template ::value, void>::type> OutputIterator c_set_difference(const C1& c1, const C2& c2, - OutputIterator output, Compare&& comp) { + OutputIterator output, LessThan&& comp) { return std::set_difference(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, - std::forward(comp)); + std::forward(comp)); } // c_set_symmetric_difference() @@ -1320,7 +1367,7 @@ OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, // Overload of c_set_symmetric_difference() for performing a merge using a // `comp` other than `operator<`. -template ::value, void>::type, @@ -1329,13 +1376,13 @@ template ::type> OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, OutputIterator output, - Compare&& comp) { + LessThan&& comp) { return std::set_symmetric_difference( container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, - std::forward(comp)); + std::forward(comp)); } //------------------------------------------------------------------------------ @@ -1354,11 +1401,11 @@ void c_push_heap(RandomAccessContainer& sequence) { // Overload of c_push_heap() for performing a push operation on a heap using a // `comp` other than `operator<`. -template -void c_push_heap(RandomAccessContainer& sequence, Compare&& comp) { +template +void c_push_heap(RandomAccessContainer& sequence, LessThan&& comp) { std::push_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } // c_pop_heap() @@ -1373,11 +1420,11 @@ void c_pop_heap(RandomAccessContainer& sequence) { // Overload of c_pop_heap() for performing a pop operation on a heap using a // `comp` other than `operator<`. -template -void c_pop_heap(RandomAccessContainer& sequence, Compare&& comp) { +template +void c_pop_heap(RandomAccessContainer& sequence, LessThan&& comp) { std::pop_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } // c_make_heap() @@ -1392,11 +1439,11 @@ void c_make_heap(RandomAccessContainer& sequence) { // Overload of c_make_heap() for performing heap comparisons using a // `comp` other than `operator<` -template -void c_make_heap(RandomAccessContainer& sequence, Compare&& comp) { +template +void c_make_heap(RandomAccessContainer& sequence, LessThan&& comp) { std::make_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } // c_sort_heap() @@ -1411,11 +1458,11 @@ void c_sort_heap(RandomAccessContainer& sequence) { // Overload of c_sort_heap() for performing heap comparisons using a // `comp` other than `operator<` -template -void c_sort_heap(RandomAccessContainer& sequence, Compare&& comp) { +template +void c_sort_heap(RandomAccessContainer& sequence, LessThan&& comp) { std::sort_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } // c_is_heap() @@ -1430,11 +1477,11 @@ bool c_is_heap(const RandomAccessContainer& sequence) { // Overload of c_is_heap() for performing heap comparisons using a // `comp` other than `operator<` -template -bool c_is_heap(const RandomAccessContainer& sequence, Compare&& comp) { +template +bool c_is_heap(const RandomAccessContainer& sequence, LessThan&& comp) { return std::is_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } // c_is_heap_until() @@ -1450,12 +1497,12 @@ c_is_heap_until(RandomAccessContainer& sequence) { // Overload of c_is_heap_until() for performing heap comparisons using a // `comp` other than `operator<` -template +template container_algorithm_internal::ContainerIter -c_is_heap_until(RandomAccessContainer& sequence, Compare&& comp) { +c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) { return std::is_heap_until(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } //------------------------------------------------------------------------------ @@ -1476,12 +1523,12 @@ container_algorithm_internal::ContainerIter c_min_element( // Overload of c_min_element() for performing a `comp` comparison other than // `operator<`. -template +template container_algorithm_internal::ContainerIter c_min_element( - Sequence& sequence, Compare&& comp) { + Sequence& sequence, LessThan&& comp) { return std::min_element(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } // c_max_element() @@ -1498,12 +1545,12 @@ container_algorithm_internal::ContainerIter c_max_element( // Overload of c_max_element() for performing a `comp` comparison other than // `operator<`. -template +template container_algorithm_internal::ContainerIter c_max_element( - Sequence& sequence, Compare&& comp) { + Sequence& sequence, LessThan&& comp) { return std::max_element(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), - std::forward(comp)); + std::forward(comp)); } // c_minmax_element() @@ -1521,12 +1568,12 @@ c_minmax_element(C& c) { // Overload of c_minmax_element() for performing `comp` comparisons other than // `operator<`. -template +template container_algorithm_internal::ContainerIterPairType -c_minmax_element(C& c, Compare&& comp) { +c_minmax_element(C& c, LessThan&& comp) { return std::minmax_element(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), - std::forward(comp)); + std::forward(comp)); } //------------------------------------------------------------------------------ @@ -1551,15 +1598,15 @@ bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2) { // Overload of c_lexicographical_compare() for performing a lexicographical // comparison using a `comp` operator instead of `operator<`. -template +template bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2, - Compare&& comp) { + LessThan&& comp) { return std::lexicographical_compare( container_algorithm_internal::c_begin(sequence1), container_algorithm_internal::c_end(sequence1), container_algorithm_internal::c_begin(sequence2), container_algorithm_internal::c_end(sequence2), - std::forward(comp)); + std::forward(comp)); } // c_next_permutation() @@ -1575,11 +1622,11 @@ bool c_next_permutation(C& c) { // Overload of c_next_permutation() for performing a lexicographical // comparison using a `comp` operator instead of `operator<`. -template -bool c_next_permutation(C& c, Compare&& comp) { +template +bool c_next_permutation(C& c, LessThan&& comp) { return std::next_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), - std::forward(comp)); + std::forward(comp)); } // c_prev_permutation() @@ -1595,11 +1642,11 @@ bool c_prev_permutation(C& c) { // Overload of c_prev_permutation() for performing a lexicographical // comparison using a `comp` operator instead of `operator<`. -template -bool c_prev_permutation(C& c, Compare&& comp) { +template +bool c_prev_permutation(C& c, LessThan&& comp) { return std::prev_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), - std::forward(comp)); + std::forward(comp)); } //------------------------------------------------------------------------------ diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/attributes.h b/TMessagesProj/jni/voip/webrtc/absl/base/attributes.h index b4bb6cf87..5721356d4 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/attributes.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/attributes.h @@ -18,8 +18,6 @@ // These macros are used within Abseil and allow the compiler to optimize, where // applicable, certain function calls. // -// This file is used for both C and C++! -// // Most macros here are exposing GCC or Clang features, and are stubbed out for // other compilers. // @@ -32,34 +30,12 @@ // of them are not supported in older version of Clang. Thus, we check // `__has_attribute()` first. If the check fails, we check if we are on GCC and // assume the attribute exists on GCC (which is verified on GCC 4.7). -// -// ----------------------------------------------------------------------------- -// Sanitizer Attributes -// ----------------------------------------------------------------------------- -// -// Sanitizer-related attributes are not "defined" in this file (and indeed -// are not defined as such in any file). To utilize the following -// sanitizer-related attributes within your builds, define the following macros -// within your build using a `-D` flag, along with the given value for -// `-fsanitize`: -// -// * `ADDRESS_SANITIZER` + `-fsanitize=address` (Clang, GCC 4.8) -// * `MEMORY_SANITIZER` + `-fsanitize=memory` (Clang-only) -// * `THREAD_SANITIZER + `-fsanitize=thread` (Clang, GCC 4.8+) -// * `UNDEFINED_BEHAVIOR_SANITIZER` + `-fsanitize=undefined` (Clang, GCC 4.9+) -// * `CONTROL_FLOW_INTEGRITY` + -fsanitize=cfi (Clang-only) -// -// Example: -// -// // Enable branches in the Abseil code that are tagged for ASan: -// $ bazel build --copt=-DADDRESS_SANITIZER --copt=-fsanitize=address -// --linkopt=-fsanitize=address *target* -// -// Since these macro names are only supported by GCC and Clang, we only check -// for `__GNUC__` (GCC or Clang) and the above macros. + #ifndef ABSL_BASE_ATTRIBUTES_H_ #define ABSL_BASE_ATTRIBUTES_H_ +#include "absl/base/config.h" + // ABSL_HAVE_ATTRIBUTE // // A function-like feature checking macro that is a wrapper around @@ -143,7 +119,7 @@ #if ABSL_HAVE_ATTRIBUTE(disable_tail_calls) #define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 #define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls)) -#elif defined(__GNUC__) && !defined(__clang__) +#elif defined(__GNUC__) && !defined(__clang__) && !defined(__e2k__) #define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 #define ABSL_ATTRIBUTE_NO_TAIL_CALL \ __attribute__((optimize("no-optimize-sibling-calls"))) @@ -155,14 +131,15 @@ // ABSL_ATTRIBUTE_WEAK // // Tags a function as weak for the purposes of compilation and linking. -// Weak attributes currently do not work properly in LLVM's Windows backend, -// so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598 +// Weak attributes did not work properly in LLVM's Windows backend before +// 9.0.0, so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598 // for further information. // The MinGW compiler doesn't complain about the weak attribute until the link // step, presumably because Windows doesn't use ELF binaries. -#if (ABSL_HAVE_ATTRIBUTE(weak) || \ - (defined(__GNUC__) && !defined(__clang__))) && \ - !(defined(__llvm__) && defined(_WIN32)) && !defined(__MINGW32__) +#if (ABSL_HAVE_ATTRIBUTE(weak) || \ + (defined(__GNUC__) && !defined(__clang__))) && \ + (!defined(_WIN32) || (defined(__clang__) && __clang_major__ >= 9)) && \ + !defined(__MINGW32__) #undef ABSL_ATTRIBUTE_WEAK #define ABSL_ATTRIBUTE_WEAK __attribute__((weak)) #define ABSL_HAVE_ATTRIBUTE_WEAK 1 @@ -234,7 +211,7 @@ // out of bounds or does other scary things with memory. // NOTE: GCC supports AddressSanitizer(asan) since 4.8. // https://gcc.gnu.org/gcc-4.8/changes.html -#if defined(__GNUC__) +#if ABSL_HAVE_ATTRIBUTE(no_sanitize_address) #define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address)) #else #define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS @@ -242,13 +219,13 @@ // ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // -// Tells the MemorySanitizer to relax the handling of a given function. All -// "Use of uninitialized value" warnings from such functions will be suppressed, -// and all values loaded from memory will be considered fully initialized. -// This attribute is similar to the ADDRESS_SANITIZER attribute above, but deals -// with initialized-ness rather than addressability issues. +// Tells the MemorySanitizer to relax the handling of a given function. All "Use +// of uninitialized value" warnings from such functions will be suppressed, and +// all values loaded from memory will be considered fully initialized. This +// attribute is similar to the ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS attribute +// above, but deals with initialized-ness rather than addressability issues. // NOTE: MemorySanitizer(msan) is supported by Clang but not GCC. -#if defined(__clang__) +#if ABSL_HAVE_ATTRIBUTE(no_sanitize_memory) #define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory)) #else #define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY @@ -259,7 +236,7 @@ // Tells the ThreadSanitizer to not instrument a given function. // NOTE: GCC supports ThreadSanitizer(tsan) since 4.8. // https://gcc.gnu.org/gcc-4.8/changes.html -#if defined(__GNUC__) +#if ABSL_HAVE_ATTRIBUTE(no_sanitize_thread) #define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread)) #else #define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD @@ -271,8 +248,10 @@ // where certain behavior (eg. division by zero) is being used intentionally. // NOTE: GCC supports UndefinedBehaviorSanitizer(ubsan) since 4.9. // https://gcc.gnu.org/gcc-4.9/changes.html -#if defined(__GNUC__) && \ - (defined(UNDEFINED_BEHAVIOR_SANITIZER) || defined(ADDRESS_SANITIZER)) +#if ABSL_HAVE_ATTRIBUTE(no_sanitize_undefined) +#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ + __attribute__((no_sanitize_undefined)) +#elif ABSL_HAVE_ATTRIBUTE(no_sanitize) #define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ __attribute__((no_sanitize("undefined"))) #else @@ -283,7 +262,7 @@ // // Tells the ControlFlowIntegrity sanitizer to not instrument a given function. // See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details. -#if defined(__GNUC__) && defined(CONTROL_FLOW_INTEGRITY) +#if ABSL_HAVE_ATTRIBUTE(no_sanitize) #define ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi"))) #else #define ABSL_ATTRIBUTE_NO_SANITIZE_CFI @@ -293,7 +272,7 @@ // // Tells the SafeStack to not instrument a given function. // See https://clang.llvm.org/docs/SafeStack.html for details. -#if defined(__GNUC__) && defined(SAFESTACK_SANITIZER) +#if ABSL_HAVE_ATTRIBUTE(no_sanitize) #define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK \ __attribute__((no_sanitize("safe-stack"))) #else @@ -303,10 +282,7 @@ // ABSL_ATTRIBUTE_RETURNS_NONNULL // // Tells the compiler that a particular function never returns a null pointer. -#if ABSL_HAVE_ATTRIBUTE(returns_nonnull) || \ - (defined(__GNUC__) && \ - (__GNUC__ > 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)) && \ - !defined(__clang__)) +#if ABSL_HAVE_ATTRIBUTE(returns_nonnull) #define ABSL_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull)) #else #define ABSL_ATTRIBUTE_RETURNS_NONNULL @@ -336,15 +312,22 @@ __attribute__((section(#name))) __attribute__((noinline)) #endif - // ABSL_ATTRIBUTE_SECTION_VARIABLE // // Tells the compiler/linker to put a given variable into a section and define // `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. // This functionality is supported by GNU linker. #ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE +#ifdef _AIX +// __attribute__((section(#name))) on AIX is achived by using the `.csect` psudo +// op which includes an additional integer as part of its syntax indcating +// alignment. If data fall under different alignments then you might get a +// compilation error indicating a `Section type conflict`. +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) +#else #define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name))) #endif +#endif // ABSL_DECLARE_ATTRIBUTE_SECTION_VARS // @@ -355,8 +338,8 @@ // a no-op on ELF but not on Mach-O. // #ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS -#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \ - extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \ +#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \ + extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \ extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK #endif #ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS @@ -417,6 +400,9 @@ // // Tells the compiler to warn about unused results. // +// For code or headers that are assured to only build with C++17 and up, prefer +// just using the standard `[[nodiscard]]` directly over this macro. +// // When annotating a function, it must appear as the first part of the // declaration or definition. The compiler will warn if the return value from // such a function is unused: @@ -443,9 +429,10 @@ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425 // // Note: past advice was to place the macro after the argument list. -#if ABSL_HAVE_ATTRIBUTE(nodiscard) -#define ABSL_MUST_USE_RESULT [[nodiscard]] -#elif defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result) +// +// TODO(b/176172494): Use ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) when all code is +// compliant with the stricter [[nodiscard]]. +#if defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result) #define ABSL_MUST_USE_RESULT __attribute__((warn_unused_result)) #else #define ABSL_MUST_USE_RESULT @@ -515,7 +502,7 @@ #define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]] #if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args) #define ABSL_XRAY_LOG_ARGS(N) \ - [[clang::xray_always_instrument, clang::xray_log_args(N)]] + [[clang::xray_always_instrument, clang::xray_log_args(N)]] #else #define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]] #endif @@ -546,6 +533,13 @@ // ABSL_ATTRIBUTE_UNUSED // // Prevents the compiler from complaining about variables that appear unused. +// +// For code or headers that are assured to only build with C++17 and up, prefer +// just using the standard '[[maybe_unused]]' directly over this macro. +// +// Due to differences in positioning requirements between the old, compiler +// specific __attribute__ syntax and the now standard [[maybe_unused]], this +// macro does not attempt to take advantage of '[[maybe_unused]]'. #if ABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__)) #undef ABSL_ATTRIBUTE_UNUSED #define ABSL_ATTRIBUTE_UNUSED __attribute__((__unused__)) @@ -566,13 +560,19 @@ // ABSL_ATTRIBUTE_PACKED // // Instructs the compiler not to use natural alignment for a tagged data -// structure, but instead to reduce its alignment to 1. This attribute can -// either be applied to members of a structure or to a structure in its -// entirety. Applying this attribute (judiciously) to a structure in its -// entirety to optimize the memory footprint of very commonly-used structs is -// fine. Do not apply this attribute to a structure in its entirety if the -// purpose is to control the offsets of the members in the structure. Instead, -// apply this attribute only to structure members that need it. +// structure, but instead to reduce its alignment to 1. +// +// Therefore, DO NOT APPLY THIS ATTRIBUTE TO STRUCTS CONTAINING ATOMICS. Doing +// so can cause atomic variables to be mis-aligned and silently violate +// atomicity on x86. +// +// This attribute can either be applied to members of a structure or to a +// structure in its entirety. Applying this attribute (judiciously) to a +// structure in its entirety to optimize the memory footprint of very +// commonly-used structs is fine. Do not apply this attribute to a structure in +// its entirety if the purpose is to control the offsets of the members in the +// structure. Instead, apply this attribute only to structure members that need +// it. // // When applying ABSL_ATTRIBUTE_PACKED only to specific structure members the // natural alignment of structure members not annotated is preserved. Aligned @@ -594,6 +594,85 @@ #define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) #endif +// ABSL_FALLTHROUGH_INTENDED +// +// Annotates implicit fall-through between switch labels, allowing a case to +// indicate intentional fallthrough and turn off warnings about any lack of a +// `break` statement. The ABSL_FALLTHROUGH_INTENDED macro should be followed by +// a semicolon and can be used in most places where `break` can, provided that +// no statements exist between it and the next switch label. +// +// Example: +// +// switch (x) { +// case 40: +// case 41: +// if (truth_is_out_there) { +// ++x; +// ABSL_FALLTHROUGH_INTENDED; // Use instead of/along with annotations +// // in comments +// } else { +// return x; +// } +// case 42: +// ... +// +// Notes: When supported, GCC and Clang can issue a warning on switch labels +// with unannotated fallthrough using the warning `-Wimplicit-fallthrough`. See +// clang documentation on language extensions for details: +// https://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough +// +// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro has +// no effect on diagnostics. In any case this macro has no effect on runtime +// behavior and performance of code. + +#ifdef ABSL_FALLTHROUGH_INTENDED +#error "ABSL_FALLTHROUGH_INTENDED should not be defined." +#elif ABSL_HAVE_CPP_ATTRIBUTE(fallthrough) +#define ABSL_FALLTHROUGH_INTENDED [[fallthrough]] +#elif ABSL_HAVE_CPP_ATTRIBUTE(clang::fallthrough) +#define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]] +#elif ABSL_HAVE_CPP_ATTRIBUTE(gnu::fallthrough) +#define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]] +#else +#define ABSL_FALLTHROUGH_INTENDED \ + do { \ + } while (0) +#endif + +// ABSL_DEPRECATED() +// +// Marks a deprecated class, struct, enum, function, method and variable +// declarations. The macro argument is used as a custom diagnostic message (e.g. +// suggestion of a better alternative). +// +// For code or headers that are assured to only build with C++14 and up, prefer +// just using the standard `[[deprecated("message")]]` directly over this macro. +// +// Examples: +// +// class ABSL_DEPRECATED("Use Bar instead") Foo {...}; +// +// ABSL_DEPRECATED("Use Baz() instead") void Bar() {...} +// +// template +// ABSL_DEPRECATED("Use DoThat() instead") +// void DoThis(); +// +// enum FooEnum { +// kBar ABSL_DEPRECATED("Use kBaz instead"), +// }; +// +// Every usage of a deprecated entity will trigger a warning when compiled with +// GCC/Clang's `-Wdeprecated-declarations` option. Google's production toolchain +// turns this warning off by default, instead relying on clang-tidy to report +// new uses of deprecated code. +#if ABSL_HAVE_ATTRIBUTE(deprecated) +#define ABSL_DEPRECATED(message) __attribute__((deprecated(message))) +#else +#define ABSL_DEPRECATED(message) +#endif + // ABSL_CONST_INIT // // A variable declaration annotated with the `ABSL_CONST_INIT` attribute will @@ -620,4 +699,47 @@ #define ABSL_CONST_INIT #endif // ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) +// ABSL_ATTRIBUTE_PURE_FUNCTION +// +// ABSL_ATTRIBUTE_PURE_FUNCTION is used to annotate declarations of "pure" +// functions. A function is pure if its return value is only a function of its +// arguments. The pure attribute prohibits a function from modifying the state +// of the program that is observable by means other than inspecting the +// function's return value. Declaring such functions with the pure attribute +// allows the compiler to avoid emitting some calls in repeated invocations of +// the function with the same argument values. +// +// Example: +// +// ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Milliseconds(Duration d); +#if ABSL_HAVE_CPP_ATTRIBUTE(gnu::pure) +#define ABSL_ATTRIBUTE_PURE_FUNCTION [[gnu::pure]] +#elif ABSL_HAVE_ATTRIBUTE(pure) +#define ABSL_ATTRIBUTE_PURE_FUNCTION __attribute__((pure)) +#else +#define ABSL_ATTRIBUTE_PURE_FUNCTION +#endif + +// ABSL_ATTRIBUTE_LIFETIME_BOUND indicates that a resource owned by a function +// parameter or implicit object parameter is retained by the return value of the +// annotated function (or, for a parameter of a constructor, in the value of the +// constructed object). This attribute causes warnings to be produced if a +// temporary object does not live long enough. +// +// When applied to a reference parameter, the referenced object is assumed to be +// retained by the return value of the function. When applied to a non-reference +// parameter (for example, a pointer or a class type), all temporaries +// referenced by the parameter are assumed to be retained by the return value of +// the function. +// +// See also the upstream documentation: +// https://clang.llvm.org/docs/AttributeReference.html#lifetimebound +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::lifetimebound) +#define ABSL_ATTRIBUTE_LIFETIME_BOUND [[clang::lifetimebound]] +#elif ABSL_HAVE_ATTRIBUTE(lifetimebound) +#define ABSL_ATTRIBUTE_LIFETIME_BOUND __attribute__((lifetimebound)) +#else +#define ABSL_ATTRIBUTE_LIFETIME_BOUND +#endif + #endif // ABSL_BASE_ATTRIBUTES_H_ diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/call_once.h b/TMessagesProj/jni/voip/webrtc/absl/base/call_once.h index bc5ec9370..96109f537 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/call_once.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/call_once.h @@ -175,17 +175,10 @@ void CallOnceImpl(std::atomic* control, std::memory_order_relaxed) || base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans, scheduling_mode) == kOnceInit) { - base_internal::Invoke(std::forward(fn), + base_internal::invoke(std::forward(fn), std::forward(args)...); - // The call to SpinLockWake below is an optimization, because the waiter - // in SpinLockWait is waiting with a short timeout. The atomic load/store - // sequence is slightly faster than an atomic exchange: - // old_control = control->exchange(base_internal::kOnceDone, - // std::memory_order_release); - // We opt for a slightly faster case when there are no waiters, in spite - // of longer tail latency when there are waiters. - old_control = control->load(std::memory_order_relaxed); - control->store(base_internal::kOnceDone, std::memory_order_release); + old_control = + control->exchange(base_internal::kOnceDone, std::memory_order_release); if (old_control == base_internal::kOnceWaiter) { base_internal::SpinLockWake(control, true); } diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/casts.h b/TMessagesProj/jni/voip/webrtc/absl/base/casts.h index 322cc1d24..b99adb069 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/casts.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/casts.h @@ -29,6 +29,10 @@ #include #include +#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L +#include // For std::bit_cast. +#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + #include "absl/base/internal/identity.h" #include "absl/base/macros.h" #include "absl/meta/type_traits.h" @@ -36,19 +40,6 @@ namespace absl { ABSL_NAMESPACE_BEGIN -namespace internal_casts { - -template -struct is_bitcastable - : std::integral_constant< - bool, - sizeof(Dest) == sizeof(Source) && - type_traits_internal::is_trivially_copyable::value && - type_traits_internal::is_trivially_copyable::value && - std::is_default_constructible::value> {}; - -} // namespace internal_casts - // implicit_cast() // // Performs an implicit conversion between types following the language @@ -105,78 +96,83 @@ constexpr To implicit_cast(typename absl::internal::identity_t to) { // bit_cast() // -// Performs a bitwise cast on a type without changing the underlying bit -// representation of that type's value. The two types must be of the same size -// and both types must be trivially copyable. As with most casts, use with -// caution. A `bit_cast()` might be needed when you need to temporarily treat a -// type as some other type, such as in the following cases: +// Creates a value of the new type `Dest` whose representation is the same as +// that of the argument, which is of (deduced) type `Source` (a "bitwise cast"; +// every bit in the value representation of the result is equal to the +// corresponding bit in the object representation of the source). Source and +// destination types must be of the same size, and both types must be trivially +// copyable. // -// * Serialization (casting temporarily to `char *` for those purposes is -// always allowed by the C++ standard) -// * Managing the individual bits of a type within mathematical operations -// that are not normally accessible through that type -// * Casting non-pointer types to pointer types (casting the other way is -// allowed by `reinterpret_cast()` but round-trips cannot occur the other -// way). -// -// Example: +// As with most casts, use with caution. A `bit_cast()` might be needed when you +// need to treat a value as the value of some other type, for example, to access +// the individual bits of an object which are not normally accessible through +// the object's type, such as for working with the binary representation of a +// floating point value: // // float f = 3.14159265358979; -// int i = bit_cast(f); +// int i = bit_cast(f); // // i = 0x40490fdb // -// Casting non-pointer types to pointer types and then dereferencing them -// traditionally produces undefined behavior. +// Reinterpreting and accessing a value directly as a different type (as shown +// below) usually results in undefined behavior. // // Example: // // // WRONG -// float f = 3.14159265358979; // WRONG -// int i = * reinterpret_cast(&f); // WRONG +// float f = 3.14159265358979; +// int i = reinterpret_cast(f); // Wrong +// int j = *reinterpret_cast(&f); // Equally wrong +// int k = *bit_cast(&f); // Equally wrong // -// The address-casting method produces undefined behavior according to the ISO -// C++ specification section [basic.lval]. Roughly, this section says: if an -// object in memory has one type, and a program accesses it with a different -// type, the result is undefined behavior for most values of "different type". +// Reinterpret-casting results in undefined behavior according to the ISO C++ +// specification, section [basic.lval]. Roughly, this section says: if an object +// in memory has one type, and a program accesses it with a different type, the +// result is undefined behavior for most "different type". +// +// Using bit_cast on a pointer and then dereferencing it is no better than using +// reinterpret_cast. You should only use bit_cast on the value itself. // // Such casting results in type punning: holding an object in memory of one type // and reading its bits back using a different type. A `bit_cast()` avoids this -// issue by implementing its casts using `memcpy()`, which avoids introducing -// this undefined behavior. +// issue by copying the object representation to a new value, which avoids +// introducing this undefined behavior (since the original value is never +// accessed in the wrong way). // -// NOTE: The requirements here are more strict than the bit_cast of standard -// proposal p0476 due to the need for workarounds and lack of intrinsics. -// Specifically, this implementation also requires `Dest` to be -// default-constructible. -template < - typename Dest, typename Source, - typename std::enable_if::value, - int>::type = 0> +// The requirements of `absl::bit_cast` are more strict than that of +// `std::bit_cast` unless compiler support is available. Specifically, without +// compiler support, this implementation also requires `Dest` to be +// default-constructible. In C++20, `absl::bit_cast` is replaced by +// `std::bit_cast`. +#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +using std::bit_cast; + +#else // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +template ::value && + type_traits_internal::is_trivially_copyable::value +#if !ABSL_HAVE_BUILTIN(__builtin_bit_cast) + && std::is_default_constructible::value +#endif // !ABSL_HAVE_BUILTIN(__builtin_bit_cast) + , + int>::type = 0> +#if ABSL_HAVE_BUILTIN(__builtin_bit_cast) +inline constexpr Dest bit_cast(const Source& source) { + return __builtin_bit_cast(Dest, source); +} +#else // ABSL_HAVE_BUILTIN(__builtin_bit_cast) inline Dest bit_cast(const Source& source) { Dest dest; memcpy(static_cast(std::addressof(dest)), static_cast(std::addressof(source)), sizeof(dest)); return dest; } +#endif // ABSL_HAVE_BUILTIN(__builtin_bit_cast) -// NOTE: This overload is only picked if the requirements of bit_cast are not -// met. It is therefore UB, but is provided temporarily as previous versions of -// this function template were unchecked. Do not use this in new code. -template < - typename Dest, typename Source, - typename std::enable_if< - !internal_casts::is_bitcastable::value, int>::type = 0> -ABSL_DEPRECATED( - "absl::bit_cast type requirements were violated. Update the types being " - "used such that they are the same size and are both TriviallyCopyable.") -inline Dest bit_cast(const Source& source) { - static_assert(sizeof(Dest) == sizeof(Source), - "Source and destination types should have equal sizes."); - - Dest dest; - memcpy(&dest, &source, sizeof(dest)); - return dest; -} +#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L ABSL_NAMESPACE_END } // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/config.h b/TMessagesProj/jni/voip/webrtc/absl/base/config.h index 1ae0ba58f..373aa0ccb 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/config.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/config.h @@ -66,6 +66,35 @@ #include "absl/base/options.h" #include "absl/base/policy_checks.h" +// Abseil long-term support (LTS) releases will define +// `ABSL_LTS_RELEASE_VERSION` to the integer representing the date string of the +// LTS release version, and will define `ABSL_LTS_RELEASE_PATCH_LEVEL` to the +// integer representing the patch-level for that release. +// +// For example, for LTS release version "20300401.2", this would give us +// ABSL_LTS_RELEASE_VERSION == 20300401 && ABSL_LTS_RELEASE_PATCH_LEVEL == 2 +// +// These symbols will not be defined in non-LTS code. +// +// Abseil recommends that clients live-at-head. Therefore, if you are using +// these symbols to assert a minimum version requirement, we recommend you do it +// as +// +// #if defined(ABSL_LTS_RELEASE_VERSION) && ABSL_LTS_RELEASE_VERSION < 20300401 +// #error Project foo requires Abseil LTS version >= 20300401 +// #endif +// +// The `defined(ABSL_LTS_RELEASE_VERSION)` part of the check excludes +// live-at-head clients from the minimum version assertion. +// +// See https://abseil.io/about/releases for more information on Abseil release +// management. +// +// LTS releases can be obtained from +// https://github.com/abseil/abseil-cpp/releases. +#undef ABSL_LTS_RELEASE_VERSION +#undef ABSL_LTS_RELEASE_PATCH_LEVEL + // Helper macro to convert a CPP variable to a string literal. #define ABSL_INTERNAL_DO_TOKEN_STR(x) #x #define ABSL_INTERNAL_TOKEN_STR(x) ABSL_INTERNAL_DO_TOKEN_STR(x) @@ -121,10 +150,16 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #if ABSL_OPTION_USE_INLINE_NAMESPACE == 0 #define ABSL_NAMESPACE_BEGIN #define ABSL_NAMESPACE_END +#define ABSL_INTERNAL_C_SYMBOL(x) x #elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1 #define ABSL_NAMESPACE_BEGIN \ inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME { #define ABSL_NAMESPACE_END } +#define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v +#define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \ + ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) +#define ABSL_INTERNAL_C_SYMBOL(x) \ + ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME) #else #error options.h is misconfigured. #endif @@ -154,6 +189,28 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #define ABSL_INTERNAL_HAS_KEYWORD(x) 0 #endif +#ifdef __has_feature +#define ABSL_HAVE_FEATURE(f) __has_feature(f) +#else +#define ABSL_HAVE_FEATURE(f) 0 +#endif + +// Portable check for GCC minimum version: +// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html +#if defined(__GNUC__) && defined(__GNUC_MINOR__) +#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) \ + (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y)) +#else +#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) 0 +#endif + +#if defined(__clang__) && defined(__clang_major__) && defined(__clang_minor__) +#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) \ + (__clang_major__ > (x) || __clang_major__ == (x) && __clang_minor__ >= (y)) +#else +#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) 0 +#endif + // ABSL_HAVE_TLS is defined to 1 when __thread should be supported. // We assume __thread is supported on Linux when compiled with Clang or compiled // against libstdc++ with _GLIBCXX_HAVE_TLS defined. @@ -171,10 +228,9 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // gcc >= 4.8.1 using libstdc++, and Visual Studio. #ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE #error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set -#elif defined(_LIBCPP_VERSION) || \ - (!defined(__clang__) && defined(__GNUC__) && defined(__GLIBCXX__) && \ - (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) || \ - defined(_MSC_VER) +#elif defined(_LIBCPP_VERSION) || defined(_MSC_VER) || \ + (!defined(__clang__) && defined(__GLIBCXX__) && \ + ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(4, 8)) #define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1 #endif @@ -187,16 +243,17 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // // Checks whether `std::is_trivially_copy_assignable` is supported. -// Notes: Clang with libc++ supports these features, as does gcc >= 5.1 with -// either libc++ or libstdc++, and Visual Studio (but not NVCC). +// Notes: Clang with libc++ supports these features, as does gcc >= 7.4 with +// libstdc++, or gcc >= 8.2 with libc++, and Visual Studio (but not NVCC). #if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) #error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set #elif defined(ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE) #error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set -#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \ - (!defined(__clang__) && defined(__GNUC__) && \ - (__GNUC__ > 7 || (__GNUC__ == 7 && __GNUC_MINOR__ >= 4)) && \ - (defined(_LIBCPP_VERSION) || defined(__GLIBCXX__))) || \ +#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \ + (!defined(__clang__) && \ + ((ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(7, 4) && defined(__GLIBCXX__)) || \ + (ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(8, 2) && \ + defined(_LIBCPP_VERSION)))) || \ (defined(_MSC_VER) && !defined(__NVCC__)) #define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1 #define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1 @@ -210,6 +267,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #if ABSL_INTERNAL_HAS_KEYWORD(__builtin_LINE) && \ ABSL_INTERNAL_HAS_KEYWORD(__builtin_FILE) #define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1 +#elif ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 0) +#define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1 #endif #endif @@ -226,11 +285,9 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator // targeting iOS 9.x. // * Xcode 10 moves the deployment target check for iOS < 9.0 to link time -// making __has_feature unreliable there. +// making ABSL_HAVE_FEATURE unreliable there. // -// Otherwise, `__has_feature` is only supported by Clang so it has be inside -// `defined(__APPLE__)` check. -#if __has_feature(cxx_thread_local) && \ +#if ABSL_HAVE_FEATURE(cxx_thread_local) && \ !(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0) #define ABSL_HAVE_THREAD_LOCAL 1 #endif @@ -307,25 +364,21 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // For further details, consult the compiler's documentation. #ifdef ABSL_HAVE_EXCEPTIONS #error ABSL_HAVE_EXCEPTIONS cannot be directly set. - -#elif defined(__clang__) - -#if __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6) +#elif ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(3, 6) // Clang >= 3.6 -#if __has_feature(cxx_exceptions) -//#define ABSL_HAVE_EXCEPTIONS 1 -#endif // __has_feature(cxx_exceptions) -#else +#if ABSL_HAVE_FEATURE(cxx_exceptions) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif // ABSL_HAVE_FEATURE(cxx_exceptions) +#elif defined(__clang__) // Clang < 3.6 // http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro -#if defined(__EXCEPTIONS) && __has_feature(cxx_exceptions) -//#define ABSL_HAVE_EXCEPTIONS 1 -#endif // defined(__EXCEPTIONS) && __has_feature(cxx_exceptions) -#endif // __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6) - +#if defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif // defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions) // Handle remaining special cases and default to exceptions being supported. -#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \ - !(defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__cpp_exceptions)) && \ +#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \ + !(ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 0) && \ + !defined(__cpp_exceptions)) && \ !(defined(_MSC_VER) && !defined(_CPPUNWIND)) #define ABSL_HAVE_EXCEPTIONS 1 #endif @@ -357,10 +410,11 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // POSIX.1-2001. #ifdef ABSL_HAVE_MMAP #error ABSL_HAVE_MMAP cannot be directly set -#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ - defined(__ros__) || defined(__native_client__) || defined(__asmjs__) || \ - defined(__wasm__) || defined(__Fuchsia__) || defined(__sun) || \ - defined(__ASYLO__) +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(_AIX) || defined(__ros__) || defined(__native_client__) || \ + defined(__asmjs__) || defined(__wasm__) || defined(__Fuchsia__) || \ + defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) || \ + defined(__HAIKU__) #define ABSL_HAVE_MMAP 1 #endif @@ -371,10 +425,19 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM #error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set #elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ - defined(__ros__) + defined(_AIX) || defined(__ros__) #define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1 #endif +// ABSL_HAVE_SCHED_GETCPU +// +// Checks whether sched_getcpu is available. +#ifdef ABSL_HAVE_SCHED_GETCPU +#error ABSL_HAVE_SCHED_GETCPU cannot be directly set +#elif defined(__linux__) +#define ABSL_HAVE_SCHED_GETCPU 1 +#endif + // ABSL_HAVE_SCHED_YIELD // // Checks whether the platform implements sched_yield(2) as defined in @@ -457,22 +520,41 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #error "absl endian detection needs to be set up for your compiler" #endif -// macOS 10.13 and iOS 10.11 don't let you use , , or -// even though the headers exist and are publicly noted to work. See -// https://github.com/abseil/abseil-cpp/issues/207 and +// macOS < 10.13 and iOS < 11 don't let you use , , or +// even though the headers exist and are publicly noted to work, because the +// libc++ shared library shipped on the system doesn't have the requisite +// exported symbols. See https://github.com/abseil/abseil-cpp/issues/207 and // https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes +// // libc++ spells out the availability requirements in the file // llvm-project/libcxx/include/__config via the #define // _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS. -#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \ - ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \ - (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \ - (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 120000) || \ - (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ - __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 50000)) +// +// Unfortunately, Apple initially mis-stated the requirements as macOS < 10.14 +// and iOS < 12 in the libc++ headers. This was corrected by +// https://github.com/llvm/llvm-project/commit/7fb40e1569dd66292b647f4501b85517e9247953 +// which subsequently made it into the XCode 12.5 release. We need to match the +// old (incorrect) conditions when built with old XCode, but can use the +// corrected earlier versions with new XCode. +#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \ + ((_LIBCPP_VERSION >= 11000 && /* XCode 12.5 or later: */ \ + ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 110000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 40000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 110000))) || \ + (_LIBCPP_VERSION < 11000 && /* Pre-XCode 12.5: */ \ + ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000)))) #define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1 #else #define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0 @@ -486,7 +568,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #endif #ifdef __has_include -#if __has_include() && __cplusplus >= 201703L && \ +#if __has_include() && defined(__cplusplus) && __cplusplus >= 201703L && \ !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE #define ABSL_HAVE_STD_ANY 1 #endif @@ -500,8 +582,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #endif #ifdef __has_include -#if __has_include() && __cplusplus >= 201703L && \ - !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#if __has_include() && defined(__cplusplus) && \ + __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE #define ABSL_HAVE_STD_OPTIONAL 1 #endif #endif @@ -514,8 +596,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #endif #ifdef __has_include -#if __has_include() && __cplusplus >= 201703L && \ - !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#if __has_include() && defined(__cplusplus) && \ + __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE #define ABSL_HAVE_STD_VARIANT 1 #endif #endif @@ -528,7 +610,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #endif #ifdef __has_include -#if __has_include() && __cplusplus >= 201703L +#if __has_include() && defined(__cplusplus) && \ + __cplusplus >= 201703L #define ABSL_HAVE_STD_STRING_VIEW 1 #endif #endif @@ -540,8 +623,9 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // not correctly set by MSVC, so we use `_MSVC_LANG` to check the language // version. // TODO(zhangxy): fix tests before enabling aliasing for `std::any`. -#if defined(_MSC_VER) && _MSC_VER >= 1910 && \ - ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || __cplusplus > 201402) +#if defined(_MSC_VER) && _MSC_VER >= 1910 && \ + ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || \ + (defined(__cplusplus) && __cplusplus > 201402)) // #define ABSL_HAVE_STD_ANY 1 #define ABSL_HAVE_STD_OPTIONAL 1 #define ABSL_HAVE_STD_VARIANT 1 @@ -661,4 +745,75 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #define ABSL_DLL #endif // defined(_MSC_VER) +// ABSL_HAVE_MEMORY_SANITIZER +// +// MemorySanitizer (MSan) is a detector of uninitialized reads. It consists of +// a compiler instrumentation module and a run-time library. +#ifdef ABSL_HAVE_MEMORY_SANITIZER +#error "ABSL_HAVE_MEMORY_SANITIZER cannot be directly set." +#elif !defined(__native_client__) && ABSL_HAVE_FEATURE(memory_sanitizer) +#define ABSL_HAVE_MEMORY_SANITIZER 1 +#endif + +// ABSL_HAVE_THREAD_SANITIZER +// +// ThreadSanitizer (TSan) is a fast data race detector. +#ifdef ABSL_HAVE_THREAD_SANITIZER +#error "ABSL_HAVE_THREAD_SANITIZER cannot be directly set." +#elif defined(__SANITIZE_THREAD__) +#define ABSL_HAVE_THREAD_SANITIZER 1 +#elif ABSL_HAVE_FEATURE(thread_sanitizer) +#define ABSL_HAVE_THREAD_SANITIZER 1 +#endif + +// ABSL_HAVE_ADDRESS_SANITIZER +// +// AddressSanitizer (ASan) is a fast memory error detector. +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +#error "ABSL_HAVE_ADDRESS_SANITIZER cannot be directly set." +#elif defined(__SANITIZE_ADDRESS__) +#define ABSL_HAVE_ADDRESS_SANITIZER 1 +#elif ABSL_HAVE_FEATURE(address_sanitizer) +#define ABSL_HAVE_ADDRESS_SANITIZER 1 +#endif + +// ABSL_HAVE_HWADDRESS_SANITIZER +// +// Hardware-Assisted AddressSanitizer (or HWASAN) is even faster than asan +// memory error detector which can use CPU features like ARM TBI, Intel LAM or +// AMD UAI. +#ifdef ABSL_HAVE_HWADDRESS_SANITIZER +#error "ABSL_HAVE_HWADDRESS_SANITIZER cannot be directly set." +#elif defined(__SANITIZE_HWADDRESS__) +#define ABSL_HAVE_HWADDRESS_SANITIZER 1 +#elif ABSL_HAVE_FEATURE(hwaddress_sanitizer) +#define ABSL_HAVE_HWADDRESS_SANITIZER 1 +#endif + +// ABSL_HAVE_LEAK_SANITIZER +// +// LeakSanitizer (or lsan) is a detector of memory leaks. +#ifdef ABSL_HAVE_LEAK_SANITIZER +#error "ABSL_HAVE_LEAK_SANITIZER cannot be directly set." +#elif ABSL_HAVE_FEATURE(leak_sanitizer) +#define ABSL_HAVE_LEAK_SANITIZER 1 +#endif + +// ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION +// +// Class template argument deduction is a language feature added in C++17. +#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION +#error "ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION cannot be directly set." +#elif defined(__cpp_deduction_guides) +#define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1 +#endif + +// `ABSL_INTERNAL_HAS_RTTI` determines whether abseil is being compiled with +// RTTI support. +#ifdef ABSL_INTERNAL_HAS_RTTI +#error ABSL_INTERNAL_HAS_RTTI cannot be directly set +#elif !defined(__GNUC__) || defined(__GXX_RTTI) +#define ABSL_INTERNAL_HAS_RTTI 1 +#endif // !defined(__GNUC__) || defined(__GXX_RTTI) + #endif // ABSL_BASE_CONFIG_H_ diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/dynamic_annotations.cc b/TMessagesProj/jni/voip/webrtc/absl/base/dynamic_annotations.cc deleted file mode 100644 index 141109375..000000000 --- a/TMessagesProj/jni/voip/webrtc/absl/base/dynamic_annotations.cc +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2017 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include - -#include "absl/base/dynamic_annotations.h" - -#ifndef __has_feature -#define __has_feature(x) 0 -#endif - -/* Compiler-based ThreadSanitizer defines - ABSL_DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL = 1 - and provides its own definitions of the functions. */ - -#ifndef ABSL_DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL -# define ABSL_DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL 0 -#endif - -/* Each function is empty and called (via a macro) only in debug mode. - The arguments are captured by dynamic tools at runtime. */ - -#if ABSL_DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 && !defined(__native_client__) - -#if __has_feature(memory_sanitizer) -#include -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -void AbslAnnotateRWLockCreate(const char *, int, - const volatile void *){} -void AbslAnnotateRWLockDestroy(const char *, int, - const volatile void *){} -void AbslAnnotateRWLockAcquired(const char *, int, - const volatile void *, long){} -void AbslAnnotateRWLockReleased(const char *, int, - const volatile void *, long){} -void AbslAnnotateBenignRace(const char *, int, - const volatile void *, - const char *){} -void AbslAnnotateBenignRaceSized(const char *, int, - const volatile void *, - size_t, - const char *) {} -void AbslAnnotateThreadName(const char *, int, - const char *){} -void AbslAnnotateIgnoreReadsBegin(const char *, int){} -void AbslAnnotateIgnoreReadsEnd(const char *, int){} -void AbslAnnotateIgnoreWritesBegin(const char *, int){} -void AbslAnnotateIgnoreWritesEnd(const char *, int){} -void AbslAnnotateEnableRaceDetection(const char *, int, int){} -void AbslAnnotateMemoryIsInitialized(const char *, int, - const volatile void *mem, size_t size) { -#if __has_feature(memory_sanitizer) - __msan_unpoison(mem, size); -#else - (void)mem; - (void)size; -#endif -} - -void AbslAnnotateMemoryIsUninitialized(const char *, int, - const volatile void *mem, size_t size) { -#if __has_feature(memory_sanitizer) - __msan_allocated_memory(mem, size); -#else - (void)mem; - (void)size; -#endif -} - -static int AbslGetRunningOnValgrind(void) { -#ifdef RUNNING_ON_VALGRIND - if (RUNNING_ON_VALGRIND) return 1; -#endif - char *running_on_valgrind_str = getenv("RUNNING_ON_VALGRIND"); - if (running_on_valgrind_str) { - return strcmp(running_on_valgrind_str, "0") != 0; - } - return 0; -} - -/* See the comments in dynamic_annotations.h */ -int AbslRunningOnValgrind(void) { - static volatile int running_on_valgrind = -1; - int local_running_on_valgrind = running_on_valgrind; - /* C doesn't have thread-safe initialization of statics, and we - don't want to depend on pthread_once here, so hack it. */ - ABSL_ANNOTATE_BENIGN_RACE(&running_on_valgrind, "safe hack"); - if (local_running_on_valgrind == -1) - running_on_valgrind = local_running_on_valgrind = AbslGetRunningOnValgrind(); - return local_running_on_valgrind; -} - -/* See the comments in dynamic_annotations.h */ -double AbslValgrindSlowdown(void) { - /* Same initialization hack as in AbslRunningOnValgrind(). */ - static volatile double slowdown = 0.0; - double local_slowdown = slowdown; - ABSL_ANNOTATE_BENIGN_RACE(&slowdown, "safe hack"); - if (AbslRunningOnValgrind() == 0) { - return 1.0; - } - if (local_slowdown == 0.0) { - char *env = getenv("VALGRIND_SLOWDOWN"); - slowdown = local_slowdown = env ? atof(env) : 50.0; - } - return local_slowdown; -} - -#ifdef __cplusplus -} // extern "C" -#endif -#endif /* ABSL_DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */ diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/dynamic_annotations.h b/TMessagesProj/jni/voip/webrtc/absl/base/dynamic_annotations.h index c156e0639..1ebf1d124 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/dynamic_annotations.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/dynamic_annotations.h @@ -1,389 +1,468 @@ -/* - * Copyright 2017 The Abseil Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/* This file defines dynamic annotations for use with dynamic analysis - tool such as valgrind, PIN, etc. +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. - Dynamic annotation is a source code annotation that affects - the generated code (that is, the annotation is not a comment). - Each such annotation is attached to a particular - instruction and/or to a particular object (address) in the program. - - The annotations that should be used by users are macros in all upper-case - (e.g., ABSL_ANNOTATE_THREAD_NAME). - - Actual implementation of these macros may differ depending on the - dynamic analysis tool being used. - - This file supports the following configurations: - - Dynamic Annotations enabled (with static thread-safety warnings disabled). - In this case, macros expand to functions implemented by Thread Sanitizer, - when building with TSan. When not provided an external implementation, - dynamic_annotations.cc provides no-op implementations. - - - Static Clang thread-safety warnings enabled. - When building with a Clang compiler that supports thread-safety warnings, - a subset of annotations can be statically-checked at compile-time. We - expand these macros to static-inline functions that can be analyzed for - thread-safety, but afterwards elided when building the final binary. - - - All annotations are disabled. - If neither Dynamic Annotations nor Clang thread-safety warnings are - enabled, then all annotation-macros expand to empty. */ +// This file defines dynamic annotations for use with dynamic analysis tool +// such as valgrind, PIN, etc. +// +// Dynamic annotation is a source code annotation that affects the generated +// code (that is, the annotation is not a comment). Each such annotation is +// attached to a particular instruction and/or to a particular object (address) +// in the program. +// +// The annotations that should be used by users are macros in all upper-case +// (e.g., ABSL_ANNOTATE_THREAD_NAME). +// +// Actual implementation of these macros may differ depending on the dynamic +// analysis tool being used. +// +// This file supports the following configurations: +// - Dynamic Annotations enabled (with static thread-safety warnings disabled). +// In this case, macros expand to functions implemented by Thread Sanitizer, +// when building with TSan. When not provided an external implementation, +// dynamic_annotations.cc provides no-op implementations. +// +// - Static Clang thread-safety warnings enabled. +// When building with a Clang compiler that supports thread-safety warnings, +// a subset of annotations can be statically-checked at compile-time. We +// expand these macros to static-inline functions that can be analyzed for +// thread-safety, but afterwards elided when building the final binary. +// +// - All annotations are disabled. +// If neither Dynamic Annotations nor Clang thread-safety warnings are +// enabled, then all annotation-macros expand to empty. #ifndef ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ #define ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ -#ifndef ABSL_DYNAMIC_ANNOTATIONS_ENABLED -# define ABSL_DYNAMIC_ANNOTATIONS_ENABLED 0 -#endif - -#if ABSL_DYNAMIC_ANNOTATIONS_ENABLED != 0 - - /* ------------------------------------------------------------- - Annotations that suppress errors. It is usually better to express the - program's synchronization using the other annotations, but these can - be used when all else fails. */ - - /* Report that we may have a benign race at "pointer", with size - "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the - point where "pointer" has been allocated, preferably close to the point - where the race happens. See also ABSL_ANNOTATE_BENIGN_RACE_STATIC. */ - #define ABSL_ANNOTATE_BENIGN_RACE(pointer, description) \ - AbslAnnotateBenignRaceSized(__FILE__, __LINE__, pointer, \ - sizeof(*(pointer)), description) - - /* Same as ABSL_ANNOTATE_BENIGN_RACE(address, description), but applies to - the memory range [address, address+size). */ - #define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ - AbslAnnotateBenignRaceSized(__FILE__, __LINE__, address, size, description) - - /* Enable (enable!=0) or disable (enable==0) race detection for all threads. - This annotation could be useful if you want to skip expensive race analysis - during some period of program execution, e.g. during initialization. */ - #define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) \ - AbslAnnotateEnableRaceDetection(__FILE__, __LINE__, enable) - - /* ------------------------------------------------------------- - Annotations useful for debugging. */ - - /* Report the current thread name to a race detector. */ - #define ABSL_ANNOTATE_THREAD_NAME(name) \ - AbslAnnotateThreadName(__FILE__, __LINE__, name) - - /* ------------------------------------------------------------- - Annotations useful when implementing locks. They are not - normally needed by modules that merely use locks. - The "lock" argument is a pointer to the lock object. */ - - /* Report that a lock has been created at address "lock". */ - #define ABSL_ANNOTATE_RWLOCK_CREATE(lock) \ - AbslAnnotateRWLockCreate(__FILE__, __LINE__, lock) - - /* Report that a linker initialized lock has been created at address "lock". - */ -#ifdef THREAD_SANITIZER - #define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ - AbslAnnotateRWLockCreateStatic(__FILE__, __LINE__, lock) -#else - #define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) ABSL_ANNOTATE_RWLOCK_CREATE(lock) -#endif - - /* Report that the lock at address "lock" is about to be destroyed. */ - #define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) \ - AbslAnnotateRWLockDestroy(__FILE__, __LINE__, lock) - - /* Report that the lock at address "lock" has been acquired. - is_w=1 for writer lock, is_w=0 for reader lock. */ - #define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ - AbslAnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w) - - /* Report that the lock at address "lock" is about to be released. */ - #define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ - AbslAnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w) - -#else /* ABSL_DYNAMIC_ANNOTATIONS_ENABLED == 0 */ - - #define ABSL_ANNOTATE_RWLOCK_CREATE(lock) /* empty */ - #define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) /* empty */ - #define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) /* empty */ - #define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */ - #define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */ - #define ABSL_ANNOTATE_BENIGN_RACE(address, description) /* empty */ - #define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */ - #define ABSL_ANNOTATE_THREAD_NAME(name) /* empty */ - #define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */ - -#endif /* ABSL_DYNAMIC_ANNOTATIONS_ENABLED */ - -/* These annotations are also made available to LLVM's Memory Sanitizer */ -#if ABSL_DYNAMIC_ANNOTATIONS_ENABLED == 1 || defined(MEMORY_SANITIZER) - #define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ - AbslAnnotateMemoryIsInitialized(__FILE__, __LINE__, address, size) - - #define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ - AbslAnnotateMemoryIsUninitialized(__FILE__, __LINE__, address, size) -#else - #define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) /* empty */ - #define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) /* empty */ -#endif /* ABSL_DYNAMIC_ANNOTATIONS_ENABLED || MEMORY_SANITIZER */ - -/* TODO(delesley) -- Replace __CLANG_SUPPORT_DYN_ANNOTATION__ with the - appropriate feature ID. */ -#if defined(__clang__) && (!defined(SWIG)) \ - && defined(__CLANG_SUPPORT_DYN_ANNOTATION__) - - #if ABSL_DYNAMIC_ANNOTATIONS_ENABLED == 0 - #define ABSL_ANNOTALYSIS_ENABLED - #endif - - /* When running in opt-mode, GCC will issue a warning, if these attributes are - compiled. Only include them when compiling using Clang. */ - #define ABSL_ATTRIBUTE_IGNORE_READS_BEGIN \ - __attribute((exclusive_lock_function("*"))) - #define ABSL_ATTRIBUTE_IGNORE_READS_END \ - __attribute((unlock_function("*"))) -#else - #define ABSL_ATTRIBUTE_IGNORE_READS_BEGIN /* empty */ - #define ABSL_ATTRIBUTE_IGNORE_READS_END /* empty */ -#endif /* defined(__clang__) && ... */ - -#if (ABSL_DYNAMIC_ANNOTATIONS_ENABLED != 0) || defined(ABSL_ANNOTALYSIS_ENABLED) - #define ABSL_ANNOTATIONS_ENABLED -#endif - -#if (ABSL_DYNAMIC_ANNOTATIONS_ENABLED != 0) - - /* Request the analysis tool to ignore all reads in the current thread - until ABSL_ANNOTATE_IGNORE_READS_END is called. - Useful to ignore intentional racey reads, while still checking - other reads and all writes. - See also ABSL_ANNOTATE_UNPROTECTED_READ. */ - #define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ - AbslAnnotateIgnoreReadsBegin(__FILE__, __LINE__) - - /* Stop ignoring reads. */ - #define ABSL_ANNOTATE_IGNORE_READS_END() \ - AbslAnnotateIgnoreReadsEnd(__FILE__, __LINE__) - - /* Similar to ABSL_ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. */ - #define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() \ - AbslAnnotateIgnoreWritesBegin(__FILE__, __LINE__) - - /* Stop ignoring writes. */ - #define ABSL_ANNOTATE_IGNORE_WRITES_END() \ - AbslAnnotateIgnoreWritesEnd(__FILE__, __LINE__) - -/* Clang provides limited support for static thread-safety analysis - through a feature called Annotalysis. We configure macro-definitions - according to whether Annotalysis support is available. */ -#elif defined(ABSL_ANNOTALYSIS_ENABLED) - - #define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ - AbslStaticAnnotateIgnoreReadsBegin(__FILE__, __LINE__) - - #define ABSL_ANNOTATE_IGNORE_READS_END() \ - AbslStaticAnnotateIgnoreReadsEnd(__FILE__, __LINE__) - - #define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() \ - AbslStaticAnnotateIgnoreWritesBegin(__FILE__, __LINE__) - - #define ABSL_ANNOTATE_IGNORE_WRITES_END() \ - AbslStaticAnnotateIgnoreWritesEnd(__FILE__, __LINE__) - -#else - #define ABSL_ANNOTATE_IGNORE_READS_BEGIN() /* empty */ - #define ABSL_ANNOTATE_IGNORE_READS_END() /* empty */ - #define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */ - #define ABSL_ANNOTATE_IGNORE_WRITES_END() /* empty */ -#endif - -/* Implement the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more - primitive annotations defined above. */ -#if defined(ABSL_ANNOTATIONS_ENABLED) - - /* Start ignoring all memory accesses (both reads and writes). */ - #define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ - do { \ - ABSL_ANNOTATE_IGNORE_READS_BEGIN(); \ - ABSL_ANNOTATE_IGNORE_WRITES_BEGIN(); \ - }while (0) - - /* Stop ignoring both reads and writes. */ - #define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() \ - do { \ - ABSL_ANNOTATE_IGNORE_WRITES_END(); \ - ABSL_ANNOTATE_IGNORE_READS_END(); \ - }while (0) - -#else - #define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */ - #define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */ -#endif - -/* Use the macros above rather than using these functions directly. */ #include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" #ifdef __cplusplus -extern "C" { -#endif -void AbslAnnotateRWLockCreate(const char *file, int line, - const volatile void *lock); -void AbslAnnotateRWLockCreateStatic(const char *file, int line, - const volatile void *lock); -void AbslAnnotateRWLockDestroy(const char *file, int line, - const volatile void *lock); -void AbslAnnotateRWLockAcquired(const char *file, int line, - const volatile void *lock, long is_w); /* NOLINT */ -void AbslAnnotateRWLockReleased(const char *file, int line, - const volatile void *lock, long is_w); /* NOLINT */ -void AbslAnnotateBenignRace(const char *file, int line, - const volatile void *address, - const char *description); -void AbslAnnotateBenignRaceSized(const char *file, int line, - const volatile void *address, - size_t size, - const char *description); -void AbslAnnotateThreadName(const char *file, int line, - const char *name); -void AbslAnnotateEnableRaceDetection(const char *file, int line, int enable); -void AbslAnnotateMemoryIsInitialized(const char *file, int line, - const volatile void *mem, size_t size); -void AbslAnnotateMemoryIsUninitialized(const char *file, int line, - const volatile void *mem, size_t size); - -/* Annotations expand to these functions, when Dynamic Annotations are enabled. - These functions are either implemented as no-op calls, if no Sanitizer is - attached, or provided with externally-linked implementations by a library - like ThreadSanitizer. */ -void AbslAnnotateIgnoreReadsBegin(const char *file, int line) - ABSL_ATTRIBUTE_IGNORE_READS_BEGIN; -void AbslAnnotateIgnoreReadsEnd(const char *file, int line) - ABSL_ATTRIBUTE_IGNORE_READS_END; -void AbslAnnotateIgnoreWritesBegin(const char *file, int line); -void AbslAnnotateIgnoreWritesEnd(const char *file, int line); - -#if defined(ABSL_ANNOTALYSIS_ENABLED) -/* When Annotalysis is enabled without Dynamic Annotations, the use of - static-inline functions allows the annotations to be read at compile-time, - while still letting the compiler elide the functions from the final build. - - TODO(delesley) -- The exclusive lock here ignores writes as well, but - allows IGNORE_READS_AND_WRITES to work properly. */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-function" -static inline void AbslStaticAnnotateIgnoreReadsBegin(const char *file, int line) - ABSL_ATTRIBUTE_IGNORE_READS_BEGIN { (void)file; (void)line; } -static inline void AbslStaticAnnotateIgnoreReadsEnd(const char *file, int line) - ABSL_ATTRIBUTE_IGNORE_READS_END { (void)file; (void)line; } -static inline void AbslStaticAnnotateIgnoreWritesBegin( - const char *file, int line) { (void)file; (void)line; } -static inline void AbslStaticAnnotateIgnoreWritesEnd( - const char *file, int line) { (void)file; (void)line; } -#pragma GCC diagnostic pop +#include "absl/base/macros.h" #endif -/* Return non-zero value if running under valgrind. +// ------------------------------------------------------------------------- +// Decide which features are enabled. - If "valgrind.h" is included into dynamic_annotations.cc, - the regular valgrind mechanism will be used. - See http://valgrind.org/docs/manual/manual-core-adv.html about - RUNNING_ON_VALGRIND and other valgrind "client requests". - The file "valgrind.h" may be obtained by doing - svn co svn://svn.valgrind.org/valgrind/trunk/include +#ifdef ABSL_HAVE_THREAD_SANITIZER - If for some reason you can't use "valgrind.h" or want to fake valgrind, - there are two ways to make this function return non-zero: - - Use environment variable: export RUNNING_ON_VALGRIND=1 - - Make your tool intercept the function AbslRunningOnValgrind() and - change its return value. - */ -int AbslRunningOnValgrind(void); +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1 -/* AbslValgrindSlowdown returns: - * 1.0, if (AbslRunningOnValgrind() == 0) - * 50.0, if (AbslRunningOnValgrind() != 0 && getenv("VALGRIND_SLOWDOWN") == NULL) - * atof(getenv("VALGRIND_SLOWDOWN")) otherwise - This function can be used to scale timeout values: - EXAMPLE: - for (;;) { - DoExpensiveBackgroundTask(); - SleepForSeconds(5 * AbslValgrindSlowdown()); - } - */ -double AbslValgrindSlowdown(void); +#else + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0 + +// Clang provides limited support for static thread-safety analysis through a +// feature called Annotalysis. We configure macro-definitions according to +// whether Annotalysis support is available. When running in opt-mode, GCC +// will issue a warning, if these attributes are compiled. Only include them +// when compiling using Clang. + +#if defined(__clang__) +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 1 +#if !defined(SWIG) +#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1 +#endif +#else +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 +#endif + +// Read/write annotations are enabled in Annotalysis mode; disabled otherwise. +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ + ABSL_INTERNAL_ANNOTALYSIS_ENABLED + +#endif // ABSL_HAVE_THREAD_SANITIZER #ifdef __cplusplus -} +#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" { +#define ABSL_INTERNAL_END_EXTERN_C } // extern "C" +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F +#define ABSL_INTERNAL_STATIC_INLINE inline +#else +#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty +#define ABSL_INTERNAL_END_EXTERN_C // empty +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F +#define ABSL_INTERNAL_STATIC_INLINE static inline #endif -/* ABSL_ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. +// ------------------------------------------------------------------------- +// Define race annotations. + +#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1 +// Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are +// defined by the compiler-based santizer implementation, not by the Abseil +// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL. + +// ------------------------------------------------------------- +// Annotations that suppress errors. It is usually better to express the +// program's synchronization using the other annotations, but these can be used +// when all else fails. + +// Report that we may have a benign race at `pointer`, with size +// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the +// point where `pointer` has been allocated, preferably close to the point +// where the race happens. See also ABSL_ANNOTATE_BENIGN_RACE_STATIC. +#define ABSL_ANNOTATE_BENIGN_RACE(pointer, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description) + +// Same as ABSL_ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to +// the memory range [`address`, `address`+`size`). +#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, address, size, description) + +// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads. +// This annotation could be useful if you want to skip expensive race analysis +// during some period of program execution, e.g. during initialization. +#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \ + (__FILE__, __LINE__, enable) + +// ------------------------------------------------------------- +// Annotations useful for debugging. + +// Report the current thread `name` to a race detector. +#define ABSL_ANNOTATE_THREAD_NAME(name) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name) + +// ------------------------------------------------------------- +// Annotations useful when implementing locks. They are not normally needed by +// modules that merely use locks. The `lock` argument is a pointer to the lock +// object. + +// Report that a lock has been created at address `lock`. +#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock) + +// Report that a linker initialized lock has been created at address `lock`. +#ifdef ABSL_HAVE_THREAD_SANITIZER +#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ + (__FILE__, __LINE__, lock) +#else +#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_ANNOTATE_RWLOCK_CREATE(lock) +#endif + +// Report that the lock at address `lock` is about to be destroyed. +#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock) + +// Report that the lock at address `lock` has been acquired. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \ + (__FILE__, __LINE__, lock, is_w) + +// Report that the lock at address `lock` is about to be released. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \ + (__FILE__, __LINE__, lock, is_w) + +// Apply ABSL_ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`. +#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace { \ + class static_var##_annotator { \ + public: \ + static_var##_annotator() { \ + ABSL_ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \ + #static_var ": " description); \ + } \ + }; \ + static static_var##_annotator the##static_var##_annotator; \ + } // namespace + +// Function prototypes of annotations provided by the compiler-based sanitizer +// implementation. +ABSL_INTERNAL_BEGIN_EXTERN_C +void AnnotateRWLockCreate(const char* file, int line, + const volatile void* lock); +void AnnotateRWLockCreateStatic(const char* file, int line, + const volatile void* lock); +void AnnotateRWLockDestroy(const char* file, int line, + const volatile void* lock); +void AnnotateRWLockAcquired(const char* file, int line, + const volatile void* lock, long is_w); // NOLINT +void AnnotateRWLockReleased(const char* file, int line, + const volatile void* lock, long is_w); // NOLINT +void AnnotateBenignRace(const char* file, int line, + const volatile void* address, const char* description); +void AnnotateBenignRaceSized(const char* file, int line, + const volatile void* address, size_t size, + const char* description); +void AnnotateThreadName(const char* file, int line, const char* name); +void AnnotateEnableRaceDetection(const char* file, int line, int enable); +ABSL_INTERNAL_END_EXTERN_C + +#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0 + +#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) // empty +#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty +#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) // empty +#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty +#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty +#define ABSL_ANNOTATE_BENIGN_RACE(address, description) // empty +#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty +#define ABSL_ANNOTATE_THREAD_NAME(name) // empty +#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty +#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty + +#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED + +// ------------------------------------------------------------------------- +// Define memory annotations. + +#ifdef ABSL_HAVE_MEMORY_SANITIZER + +#include + +#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + __msan_unpoison(address, size) + +#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + __msan_allocated_memory(address, size) + +#else // !defined(ABSL_HAVE_MEMORY_SANITIZER) + +// TODO(rogeeff): remove this branch +#ifdef ABSL_HAVE_THREAD_SANITIZER +#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + do { \ + (void)(address); \ + (void)(size); \ + } while (0) +#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + do { \ + (void)(address); \ + (void)(size); \ + } while (0) +#else + +#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty +#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty + +#endif + +#endif // ABSL_HAVE_MEMORY_SANITIZER + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END attributes. + +#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ + __attribute((exclusive_lock_function("*"))) +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ + __attribute((unlock_function("*"))) + +#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty + +#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END annotations. + +#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1 +// Some of the symbols used in this section (e.g. AnnotateIgnoreReadsBegin) are +// defined by the compiler-based implementation, not by the Abseil +// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL. + +// Request the analysis tool to ignore all reads in the current thread until +// ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey +// reads, while still checking other reads and all writes. +// See also ABSL_ANNOTATE_UNPROTECTED_READ. +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \ + (__FILE__, __LINE__) + +// Stop ignoring reads. +#define ABSL_ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \ + (__FILE__, __LINE__) + +// Function prototypes of annotations provided by the compiler-based sanitizer +// implementation. +ABSL_INTERNAL_BEGIN_EXTERN_C +void AnnotateIgnoreReadsBegin(const char* file, int line) + ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE; +void AnnotateIgnoreReadsEnd(const char* file, + int line) ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE; +ABSL_INTERNAL_END_EXTERN_C + +#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED) + +// When Annotalysis is enabled without Dynamic Annotations, the use of +// static-inline functions allows the annotations to be read at compile-time, +// while still letting the compiler elide the functions from the final build. +// +// TODO(delesley) -- The exclusive lock here ignores writes as well, but +// allows IGNORE_READS_AND_WRITES to work properly. + +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED( \ + ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)) \ + () + +#define ABSL_ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED( \ + ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)) \ + () + +ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( + AbslInternalAnnotateIgnoreReadsBegin)() + ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE {} + +ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( + AbslInternalAnnotateIgnoreReadsEnd)() + ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE {} + +#else + +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() // empty +#define ABSL_ANNOTATE_IGNORE_READS_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define IGNORE_WRITES_BEGIN/_END annotations. + +#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1 + +// Similar to ABSL_ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. +#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__) + +// Stop ignoring writes. +#define ABSL_ANNOTATE_IGNORE_WRITES_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__) + +// Function prototypes of annotations provided by the compiler-based sanitizer +// implementation. +ABSL_INTERNAL_BEGIN_EXTERN_C +void AnnotateIgnoreWritesBegin(const char* file, int line); +void AnnotateIgnoreWritesEnd(const char* file, int line); +ABSL_INTERNAL_END_EXTERN_C + +#else + +#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() // empty +#define ABSL_ANNOTATE_IGNORE_WRITES_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define the ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more +// primitive annotations defined above. +// +// Instead of doing +// ABSL_ANNOTATE_IGNORE_READS_BEGIN(); +// ... = x; +// ABSL_ANNOTATE_IGNORE_READS_END(); +// one can use +// ... = ABSL_ANNOTATE_UNPROTECTED_READ(x); + +#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED) + +// Start ignoring all memory accesses (both reads and writes). +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ + do { \ + ABSL_ANNOTATE_IGNORE_READS_BEGIN(); \ + ABSL_ANNOTATE_IGNORE_WRITES_BEGIN(); \ + } while (0) + +// Stop ignoring both reads and writes. +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() \ + do { \ + ABSL_ANNOTATE_IGNORE_WRITES_END(); \ + ABSL_ANNOTATE_IGNORE_READS_END(); \ + } while (0) + +#ifdef __cplusplus +// ABSL_ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. +#define ABSL_ANNOTATE_UNPROTECTED_READ(x) \ + absl::base_internal::AnnotateUnprotectedRead(x) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { - Instead of doing - ABSL_ANNOTATE_IGNORE_READS_BEGIN(); - ... = x; - ABSL_ANNOTATE_IGNORE_READS_END(); - one can use - ... = ABSL_ANNOTATE_UNPROTECTED_READ(x); */ -#if defined(__cplusplus) && defined(ABSL_ANNOTATIONS_ENABLED) template -inline T ABSL_ANNOTATE_UNPROTECTED_READ(const volatile T &x) { /* NOLINT */ +inline T AnnotateUnprotectedRead(const volatile T& x) { // NOLINT ABSL_ANNOTATE_IGNORE_READS_BEGIN(); T res = x; ABSL_ANNOTATE_IGNORE_READS_END(); return res; - } -#else - #define ABSL_ANNOTATE_UNPROTECTED_READ(x) (x) +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl #endif -#if ABSL_DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus) - /* Apply ABSL_ANNOTATE_BENIGN_RACE_SIZED to a static variable. */ - #define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ - namespace { \ - class static_var ## _annotator { \ - public: \ - static_var ## _annotator() { \ - ABSL_ANNOTATE_BENIGN_RACE_SIZED(&static_var, \ - sizeof(static_var), \ - # static_var ": " description); \ - } \ - }; \ - static static_var ## _annotator the ## static_var ## _annotator;\ - } // namespace -#else /* ABSL_DYNAMIC_ANNOTATIONS_ENABLED == 0 */ - #define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) /* empty */ -#endif /* ABSL_DYNAMIC_ANNOTATIONS_ENABLED */ +#else -#ifdef ADDRESS_SANITIZER -/* Describe the current state of a contiguous container such as e.g. - * std::vector or std::string. For more details see - * sanitizer/common_interface_defs.h, which is provided by the compiler. */ +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty +#define ABSL_ANNOTATE_UNPROTECTED_READ(x) (x) + +#endif + +// ------------------------------------------------------------------------- +// Address sanitizer annotations + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +// Describe the current state of a contiguous container such as e.g. +// std::vector or std::string. For more details see +// sanitizer/common_interface_defs.h, which is provided by the compiler. #include + #define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) -#define ABSL_ADDRESS_SANITIZER_REDZONE(name) \ - struct { char x[8] __attribute__ ((aligned (8))); } name +#define ABSL_ADDRESS_SANITIZER_REDZONE(name) \ + struct { \ + alignas(8) char x[8]; \ + } name + #else -#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) + +#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) // empty #define ABSL_ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "") -#endif // ADDRESS_SANITIZER -/* Undefine the macros intended only in this file. */ -#undef ABSL_ANNOTALYSIS_ENABLED -#undef ABSL_ANNOTATIONS_ENABLED -#undef ABSL_ATTRIBUTE_IGNORE_READS_BEGIN -#undef ABSL_ATTRIBUTE_IGNORE_READS_END +#endif // ABSL_HAVE_ADDRESS_SANITIZER -#endif /* ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ */ +// ------------------------------------------------------------------------- +// Undefine the macros intended only for this file. + +#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED +#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_BEGIN_EXTERN_C +#undef ABSL_INTERNAL_END_EXTERN_C +#undef ABSL_INTERNAL_STATIC_INLINE + +#endif // ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/bits.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/bits.h deleted file mode 100644 index 14c51d8b3..000000000 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/bits.h +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2018 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef ABSL_BASE_INTERNAL_BITS_H_ -#define ABSL_BASE_INTERNAL_BITS_H_ - -// This file contains bitwise ops which are implementation details of various -// absl libraries. - -#include - -#include "absl/base/config.h" - -// Clang on Windows has __builtin_clzll; otherwise we need to use the -// windows intrinsic functions. -#if defined(_MSC_VER) && !defined(__clang__) -#include -#if defined(_M_X64) -#pragma intrinsic(_BitScanReverse64) -#pragma intrinsic(_BitScanForward64) -#endif -#pragma intrinsic(_BitScanReverse) -#pragma intrinsic(_BitScanForward) -#endif - -#include "absl/base/attributes.h" - -#if defined(_MSC_VER) && !defined(__clang__) -// We can achieve something similar to attribute((always_inline)) with MSVC by -// using the __forceinline keyword, however this is not perfect. MSVC is -// much less aggressive about inlining, and even with the __forceinline keyword. -#define ABSL_BASE_INTERNAL_FORCEINLINE __forceinline -#else -// Use default attribute inline. -#define ABSL_BASE_INTERNAL_FORCEINLINE inline ABSL_ATTRIBUTE_ALWAYS_INLINE -#endif - - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64Slow(uint64_t n) { - int zeroes = 60; - if (n >> 32) { - zeroes -= 32; - n >>= 32; - } - if (n >> 16) { - zeroes -= 16; - n >>= 16; - } - if (n >> 8) { - zeroes -= 8; - n >>= 8; - } - if (n >> 4) { - zeroes -= 4; - n >>= 4; - } - return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes; -} - -ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) { -#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64) - // MSVC does not have __buitin_clzll. Use _BitScanReverse64. - unsigned long result = 0; // NOLINT(runtime/int) - if (_BitScanReverse64(&result, n)) { - return 63 - result; - } - return 64; -#elif defined(_MSC_VER) && !defined(__clang__) - // MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse - unsigned long result = 0; // NOLINT(runtime/int) - if ((n >> 32) && _BitScanReverse(&result, n >> 32)) { - return 31 - result; - } - if (_BitScanReverse(&result, n)) { - return 63 - result; - } - return 64; -#elif defined(__GNUC__) || defined(__clang__) - // Use __builtin_clzll, which uses the following instructions: - // x86: bsr - // ARM64: clz - // PPC: cntlzd - static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int) - "__builtin_clzll does not take 64-bit arg"); - - // Handle 0 as a special case because __builtin_clzll(0) is undefined. - if (n == 0) { - return 64; - } - return __builtin_clzll(n); -#else - return CountLeadingZeros64Slow(n); -#endif -} - -ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32Slow(uint64_t n) { - int zeroes = 28; - if (n >> 16) { - zeroes -= 16; - n >>= 16; - } - if (n >> 8) { - zeroes -= 8; - n >>= 8; - } - if (n >> 4) { - zeroes -= 4; - n >>= 4; - } - return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes; -} - -ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32(uint32_t n) { -#if defined(_MSC_VER) && !defined(__clang__) - unsigned long result = 0; // NOLINT(runtime/int) - if (_BitScanReverse(&result, n)) { - return 31 - result; - } - return 32; -#elif defined(__GNUC__) || defined(__clang__) - // Use __builtin_clz, which uses the following instructions: - // x86: bsr - // ARM64: clz - // PPC: cntlzd - static_assert(sizeof(int) == sizeof(n), - "__builtin_clz does not take 32-bit arg"); - - // Handle 0 as a special case because __builtin_clz(0) is undefined. - if (n == 0) { - return 32; - } - return __builtin_clz(n); -#else - return CountLeadingZeros32Slow(n); -#endif -} - -ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64Slow(uint64_t n) { - int c = 63; - n &= ~n + 1; - if (n & 0x00000000FFFFFFFF) c -= 32; - if (n & 0x0000FFFF0000FFFF) c -= 16; - if (n & 0x00FF00FF00FF00FF) c -= 8; - if (n & 0x0F0F0F0F0F0F0F0F) c -= 4; - if (n & 0x3333333333333333) c -= 2; - if (n & 0x5555555555555555) c -= 1; - return c; -} - -ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64(uint64_t n) { -#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64) - unsigned long result = 0; // NOLINT(runtime/int) - _BitScanForward64(&result, n); - return result; -#elif defined(_MSC_VER) && !defined(__clang__) - unsigned long result = 0; // NOLINT(runtime/int) - if (static_cast(n) == 0) { - _BitScanForward(&result, n >> 32); - return result + 32; - } - _BitScanForward(&result, n); - return result; -#elif defined(__GNUC__) || defined(__clang__) - static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int) - "__builtin_ctzll does not take 64-bit arg"); - return __builtin_ctzll(n); -#else - return CountTrailingZerosNonZero64Slow(n); -#endif -} - -ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32Slow(uint32_t n) { - int c = 31; - n &= ~n + 1; - if (n & 0x0000FFFF) c -= 16; - if (n & 0x00FF00FF) c -= 8; - if (n & 0x0F0F0F0F) c -= 4; - if (n & 0x33333333) c -= 2; - if (n & 0x55555555) c -= 1; - return c; -} - -ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32(uint32_t n) { -#if defined(_MSC_VER) && !defined(__clang__) - unsigned long result = 0; // NOLINT(runtime/int) - _BitScanForward(&result, n); - return result; -#elif defined(__GNUC__) || defined(__clang__) - static_assert(sizeof(int) == sizeof(n), - "__builtin_ctz does not take 32-bit arg"); - return __builtin_ctz(n); -#else - return CountTrailingZerosNonZero32Slow(n); -#endif -} - -#undef ABSL_BASE_INTERNAL_FORCEINLINE - -} // namespace base_internal -ABSL_NAMESPACE_END -} // namespace absl - -#endif // ABSL_BASE_INTERNAL_BITS_H_ diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/direct_mmap.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/direct_mmap.h index 5618867ba..274054cd5 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/direct_mmap.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/direct_mmap.h @@ -61,6 +61,10 @@ extern "C" void* __mmap2(void*, size_t, int, int, int, size_t); #endif #endif // __BIONIC__ +#if defined(__NR_mmap2) && !defined(SYS_mmap2) +#define SYS_mmap2 __NR_mmap2 +#endif + namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { @@ -70,9 +74,13 @@ namespace base_internal { inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, off64_t offset) noexcept { #if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \ + defined(__m68k__) || defined(__sh__) || \ + (defined(__hppa__) && !defined(__LP64__)) || \ (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \ (defined(__PPC__) && !defined(__PPC64__)) || \ - (defined(__s390__) && !defined(__s390x__)) + (defined(__riscv) && __riscv_xlen == 32) || \ + (defined(__s390__) && !defined(__s390x__)) || \ + (defined(__sparc__) && !defined(__arch64__)) // On these architectures, implement mmap with mmap2. static int pagesize = 0; if (pagesize == 0) { diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/dynamic_annotations.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/dynamic_annotations.h new file mode 100644 index 000000000..b23c5ec1c --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/dynamic_annotations.h @@ -0,0 +1,398 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines dynamic annotations for use with dynamic analysis tool +// such as valgrind, PIN, etc. +// +// Dynamic annotation is a source code annotation that affects the generated +// code (that is, the annotation is not a comment). Each such annotation is +// attached to a particular instruction and/or to a particular object (address) +// in the program. +// +// The annotations that should be used by users are macros in all upper-case +// (e.g., ANNOTATE_THREAD_NAME). +// +// Actual implementation of these macros may differ depending on the dynamic +// analysis tool being used. +// +// This file supports the following configurations: +// - Dynamic Annotations enabled (with static thread-safety warnings disabled). +// In this case, macros expand to functions implemented by Thread Sanitizer, +// when building with TSan. When not provided an external implementation, +// dynamic_annotations.cc provides no-op implementations. +// +// - Static Clang thread-safety warnings enabled. +// When building with a Clang compiler that supports thread-safety warnings, +// a subset of annotations can be statically-checked at compile-time. We +// expand these macros to static-inline functions that can be analyzed for +// thread-safety, but afterwards elided when building the final binary. +// +// - All annotations are disabled. +// If neither Dynamic Annotations nor Clang thread-safety warnings are +// enabled, then all annotation-macros expand to empty. + +#ifndef ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ +#define ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ + +#include + +#include "absl/base/config.h" + +// ------------------------------------------------------------------------- +// Decide which features are enabled + +#ifndef DYNAMIC_ANNOTATIONS_ENABLED +#define DYNAMIC_ANNOTATIONS_ENABLED 0 +#endif + +#if defined(__clang__) && !defined(SWIG) +#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1 +#endif + +#if DYNAMIC_ANNOTATIONS_ENABLED != 0 + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1 + +#else + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0 + +// Clang provides limited support for static thread-safety analysis through a +// feature called Annotalysis. We configure macro-definitions according to +// whether Annotalysis support is available. When running in opt-mode, GCC +// will issue a warning, if these attributes are compiled. Only include them +// when compiling using Clang. + +// ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1 +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \ + defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) +// Read/write annotations are enabled in Annotalysis mode; disabled otherwise. +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ + ABSL_INTERNAL_ANNOTALYSIS_ENABLED +#endif + +// Memory annotations are also made available to LLVM's Memory Sanitizer +#if defined(ABSL_HAVE_MEMORY_SANITIZER) && !defined(__native_client__) +#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1 +#endif + +#ifndef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED +#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 0 +#endif + +#ifdef __cplusplus +#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" { +#define ABSL_INTERNAL_END_EXTERN_C } // extern "C" +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F +#define ABSL_INTERNAL_STATIC_INLINE inline +#else +#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty +#define ABSL_INTERNAL_END_EXTERN_C // empty +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F +#define ABSL_INTERNAL_STATIC_INLINE static inline +#endif + +// ------------------------------------------------------------------------- +// Define race annotations. + +#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1 + +// ------------------------------------------------------------- +// Annotations that suppress errors. It is usually better to express the +// program's synchronization using the other annotations, but these can be used +// when all else fails. + +// Report that we may have a benign race at `pointer`, with size +// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the +// point where `pointer` has been allocated, preferably close to the point +// where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. +#define ANNOTATE_BENIGN_RACE(pointer, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description) + +// Same as ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to +// the memory range [`address`, `address`+`size`). +#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, address, size, description) + +// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads. +// This annotation could be useful if you want to skip expensive race analysis +// during some period of program execution, e.g. during initialization. +#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \ + (__FILE__, __LINE__, enable) + +// ------------------------------------------------------------- +// Annotations useful for debugging. + +// Report the current thread `name` to a race detector. +#define ANNOTATE_THREAD_NAME(name) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name) + +// ------------------------------------------------------------- +// Annotations useful when implementing locks. They are not normally needed by +// modules that merely use locks. The `lock` argument is a pointer to the lock +// object. + +// Report that a lock has been created at address `lock`. +#define ANNOTATE_RWLOCK_CREATE(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock) + +// Report that a linker initialized lock has been created at address `lock`. +#ifdef ABSL_HAVE_THREAD_SANITIZER +#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ + (__FILE__, __LINE__, lock) +#else +#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock) +#endif + +// Report that the lock at address `lock` is about to be destroyed. +#define ANNOTATE_RWLOCK_DESTROY(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock) + +// Report that the lock at address `lock` has been acquired. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \ + (__FILE__, __LINE__, lock, is_w) + +// Report that the lock at address `lock` is about to be released. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \ + (__FILE__, __LINE__, lock, is_w) + +// Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`. +#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace { \ + class static_var##_annotator { \ + public: \ + static_var##_annotator() { \ + ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \ + #static_var ": " description); \ + } \ + }; \ + static static_var##_annotator the##static_var##_annotator; \ + } // namespace + +#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0 + +#define ANNOTATE_RWLOCK_CREATE(lock) // empty +#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty +#define ANNOTATE_RWLOCK_DESTROY(lock) // empty +#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty +#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty +#define ANNOTATE_BENIGN_RACE(address, description) // empty +#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty +#define ANNOTATE_THREAD_NAME(name) // empty +#define ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty +#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty + +#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED + +// ------------------------------------------------------------------------- +// Define memory annotations. + +#if ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 1 + +#include + +#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + __msan_unpoison(address, size) + +#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + __msan_allocated_memory(address, size) + +#else // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 0 + +#if DYNAMIC_ANNOTATIONS_ENABLED == 1 +#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + do { \ + (void)(address); \ + (void)(size); \ + } while (0) +#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + do { \ + (void)(address); \ + (void)(size); \ + } while (0) +#else +#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty +#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty +#endif + +#endif // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END attributes. + +#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ + __attribute((exclusive_lock_function("*"))) +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ + __attribute((unlock_function("*"))) + +#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty + +#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END annotations. + +#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1 + +// Request the analysis tool to ignore all reads in the current thread until +// ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey +// reads, while still checking other reads and all writes. +// See also ANNOTATE_UNPROTECTED_READ. +#define ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__) + +// Stop ignoring reads. +#define ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__) + +#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED) + +// When Annotalysis is enabled without Dynamic Annotations, the use of +// static-inline functions allows the annotations to be read at compile-time, +// while still letting the compiler elide the functions from the final build. +// +// TODO(delesley) -- The exclusive lock here ignores writes as well, but +// allows IGNORE_READS_AND_WRITES to work properly. + +#define ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)() + +#define ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)() + +#else + +#define ANNOTATE_IGNORE_READS_BEGIN() // empty +#define ANNOTATE_IGNORE_READS_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define IGNORE_WRITES_BEGIN/_END annotations. + +#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1 + +// Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. +#define ANNOTATE_IGNORE_WRITES_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__) + +// Stop ignoring writes. +#define ANNOTATE_IGNORE_WRITES_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__) + +#else + +#define ANNOTATE_IGNORE_WRITES_BEGIN() // empty +#define ANNOTATE_IGNORE_WRITES_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more +// primitive annotations defined above. +// +// Instead of doing +// ANNOTATE_IGNORE_READS_BEGIN(); +// ... = x; +// ANNOTATE_IGNORE_READS_END(); +// one can use +// ... = ANNOTATE_UNPROTECTED_READ(x); + +#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED) + +// Start ignoring all memory accesses (both reads and writes). +#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ + do { \ + ANNOTATE_IGNORE_READS_BEGIN(); \ + ANNOTATE_IGNORE_WRITES_BEGIN(); \ + } while (0) + +// Stop ignoring both reads and writes. +#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ + do { \ + ANNOTATE_IGNORE_WRITES_END(); \ + ANNOTATE_IGNORE_READS_END(); \ + } while (0) + +#ifdef __cplusplus +// ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. +#define ANNOTATE_UNPROTECTED_READ(x) \ + absl::base_internal::AnnotateUnprotectedRead(x) + +#endif + +#else + +#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty +#define ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty +#define ANNOTATE_UNPROTECTED_READ(x) (x) + +#endif + +// ------------------------------------------------------------------------- +// Address sanitizer annotations + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +// Describe the current state of a contiguous container such as e.g. +// std::vector or std::string. For more details see +// sanitizer/common_interface_defs.h, which is provided by the compiler. +#include + +#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ + __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) \ + struct { \ + char x[8] __attribute__((aligned(8))); \ + } name + +#else + +#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "") + +#endif // ABSL_HAVE_ADDRESS_SANITIZER + +// ------------------------------------------------------------------------- +// Undefine the macros intended only for this file. + +#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED +#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_BEGIN_EXTERN_C +#undef ABSL_INTERNAL_END_EXTERN_C +#undef ABSL_INTERNAL_STATIC_INLINE + +#endif // ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/endian.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/endian.h index 9677530e8..dad0e9aeb 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/endian.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/endian.h @@ -26,6 +26,7 @@ #endif #include +#include "absl/base/casts.h" #include "absl/base/config.h" #include "absl/base/internal/unaligned_access.h" #include "absl/base/port.h" @@ -173,6 +174,36 @@ inline constexpr bool IsLittleEndian() { return false; } #endif /* ENDIAN */ +inline uint8_t FromHost(uint8_t x) { return x; } +inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } +inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } +inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } +inline uint8_t ToHost(uint8_t x) { return x; } +inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } +inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } +inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } + +inline int8_t FromHost(int8_t x) { return x; } +inline int16_t FromHost(int16_t x) { + return bit_cast(FromHost16(bit_cast(x))); +} +inline int32_t FromHost(int32_t x) { + return bit_cast(FromHost32(bit_cast(x))); +} +inline int64_t FromHost(int64_t x) { + return bit_cast(FromHost64(bit_cast(x))); +} +inline int8_t ToHost(int8_t x) { return x; } +inline int16_t ToHost(int16_t x) { + return bit_cast(ToHost16(bit_cast(x))); +} +inline int32_t ToHost(int32_t x) { + return bit_cast(ToHost32(bit_cast(x))); +} +inline int64_t ToHost(int64_t x) { + return bit_cast(ToHost64(bit_cast(x))); +} + // Functions to do unaligned loads and stores in little-endian order. inline uint16_t Load16(const void *p) { return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); @@ -233,6 +264,36 @@ inline constexpr bool IsLittleEndian() { return false; } #endif /* ENDIAN */ +inline uint8_t FromHost(uint8_t x) { return x; } +inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } +inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } +inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } +inline uint8_t ToHost(uint8_t x) { return x; } +inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } +inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } +inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } + +inline int8_t FromHost(int8_t x) { return x; } +inline int16_t FromHost(int16_t x) { + return bit_cast(FromHost16(bit_cast(x))); +} +inline int32_t FromHost(int32_t x) { + return bit_cast(FromHost32(bit_cast(x))); +} +inline int64_t FromHost(int64_t x) { + return bit_cast(FromHost64(bit_cast(x))); +} +inline int8_t ToHost(int8_t x) { return x; } +inline int16_t ToHost(int16_t x) { + return bit_cast(ToHost16(bit_cast(x))); +} +inline int32_t ToHost(int32_t x) { + return bit_cast(ToHost32(bit_cast(x))); +} +inline int64_t ToHost(int64_t x) { + return bit_cast(ToHost64(bit_cast(x))); +} + // Functions to do unaligned loads and stores in big-endian order. inline uint16_t Load16(const void *p) { return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/exception_safety_testing.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/exception_safety_testing.h index 6ba89d05d..d19863fd6 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/exception_safety_testing.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/exception_safety_testing.h @@ -30,7 +30,6 @@ #include #include -#include "gtest/gtest.h" #include "absl/base/internal/pretty_function.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" @@ -536,7 +535,22 @@ class ThrowingValue : private exceptions_internal::TrackedObject { } // Memory management operators - // Args.. allows us to overload regular and placement new in one shot + static void* operator new(size_t s) noexcept( + IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new(s); + } + + static void* operator new[](size_t s) noexcept( + IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new[](s); + } + template static void* operator new(size_t s, Args&&... args) noexcept( IsSpecified(TypeSpec::kNoThrowNew)) { @@ -557,12 +571,6 @@ class ThrowingValue : private exceptions_internal::TrackedObject { // Abseil doesn't support throwing overloaded operator delete. These are // provided so a throwing operator-new can clean up after itself. - // - // We provide both regular and templated operator delete because if only the - // templated version is provided as we did with operator new, the compiler has - // no way of knowing which overload of operator delete to call. See - // https://en.cppreference.com/w/cpp/memory/new/operator_delete and - // https://en.cppreference.com/w/cpp/language/delete for the gory details. void operator delete(void* p) noexcept { ::operator delete(p); } template @@ -726,9 +734,8 @@ class ThrowingAllocator : private exceptions_internal::TrackedObject { ThrowingAllocator select_on_container_copy_construction() noexcept( IsSpecified(AllocSpec::kNoThrowAllocate)) { - auto& out = *this; ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); - return out; + return *this; } template diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/exponential_biased.cc b/TMessagesProj/jni/voip/webrtc/absl/base/internal/exponential_biased.cc index 1b30c061e..05aeea566 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/exponential_biased.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/exponential_biased.cc @@ -64,7 +64,7 @@ int64_t ExponentialBiased::GetSkipCount(int64_t mean) { // Assume huge values are bias neutral, retain bias for next call. return std::numeric_limits::max() / 2; } - double value = std::round(interval); + double value = std::rint(interval); bias_ = interval - value; return value; } diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/exponential_biased.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/exponential_biased.h index 94f79a337..a81f10e23 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/exponential_biased.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/exponential_biased.h @@ -66,7 +66,7 @@ namespace base_internal { // Adjusting with rounding bias is relatively trivial: // // double value = bias_ + exponential_distribution(mean)(); -// double rounded_value = std::round(value); +// double rounded_value = std::rint(value); // bias_ = value - rounded_value; // return rounded_value; // diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/invoke.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/invoke.h index c4eceebd7..5c71f3282 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/invoke.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/invoke.h @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// absl::base_internal::Invoke(f, args...) is an implementation of +// absl::base_internal::invoke(f, args...) is an implementation of // INVOKE(f, args...) from section [func.require] of the C++ standard. // // [func.require] @@ -29,7 +29,7 @@ // is not one of the types described in the previous item; // 5. f(t1, t2, ..., tN) in all other cases. // -// The implementation is SFINAE-friendly: substitution failure within Invoke() +// The implementation is SFINAE-friendly: substitution failure within invoke() // isn't an error. #ifndef ABSL_BASE_INTERNAL_INVOKE_H_ @@ -170,13 +170,13 @@ struct Invoker { // The result type of Invoke. template -using InvokeT = decltype(Invoker::type::Invoke( +using invoke_result_t = decltype(Invoker::type::Invoke( std::declval(), std::declval()...)); // Invoke(f, args...) is an implementation of INVOKE(f, args...) from section // [func.require] of the C++ standard. template -InvokeT Invoke(F&& f, Args&&... args) { +invoke_result_t invoke(F&& f, Args&&... args) { return Invoker::type::Invoke(std::forward(f), std::forward(args)...); } diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/low_level_scheduling.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/low_level_scheduling.h index 961cc981b..9baccc065 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/low_level_scheduling.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/low_level_scheduling.h @@ -18,6 +18,7 @@ #ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ #define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ +#include "absl/base/internal/raw_logging.h" #include "absl/base/internal/scheduling_mode.h" #include "absl/base/macros.h" @@ -29,6 +30,13 @@ extern "C" void __google_enable_rescheduling(bool disable_result); namespace absl { ABSL_NAMESPACE_BEGIN +class CondVar; +class Mutex; + +namespace synchronization_internal { +int MutexDelay(int32_t c, int mode); +} // namespace synchronization_internal + namespace base_internal { class SchedulingHelper; // To allow use of SchedulingGuard. @@ -53,6 +61,8 @@ class SchedulingGuard { public: // Returns true iff the calling thread may be cooperatively rescheduled. static bool ReschedulingIsAllowed(); + SchedulingGuard(const SchedulingGuard&) = delete; + SchedulingGuard& operator=(const SchedulingGuard&) = delete; private: // Disable cooperative rescheduling of the calling thread. It may still @@ -76,12 +86,23 @@ class SchedulingGuard { bool disabled; }; - // Access to SchedulingGuard is explicitly white-listed. + // A scoped helper to enable rescheduling temporarily. + // REQUIRES: destructor must run in same thread as constructor. + class ScopedEnable { + public: + ScopedEnable(); + ~ScopedEnable(); + + private: + int scheduling_disabled_depth_; + }; + + // Access to SchedulingGuard is explicitly permitted. + friend class absl::CondVar; + friend class absl::Mutex; friend class SchedulingHelper; friend class SpinLock; - - SchedulingGuard(const SchedulingGuard&) = delete; - SchedulingGuard& operator=(const SchedulingGuard&) = delete; + friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode); }; //------------------------------------------------------------------------------ @@ -100,6 +121,12 @@ inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) { return; } +inline SchedulingGuard::ScopedEnable::ScopedEnable() + : scheduling_disabled_depth_(0) {} +inline SchedulingGuard::ScopedEnable::~ScopedEnable() { + ABSL_RAW_CHECK(scheduling_disabled_depth_ == 0, "disable unused warning"); +} + } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/raw_logging.cc b/TMessagesProj/jni/voip/webrtc/absl/base/internal/raw_logging.cc index 40cea5506..074e026ad 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/raw_logging.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/raw_logging.cc @@ -67,28 +67,32 @@ #undef ABSL_HAVE_RAW_IO #endif -// TODO(gfalcon): We want raw-logging to work on as many platforms as possible. -// Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a -// whitelisted set of platforms for which we expect not to be able to raw log. +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace raw_logging_internal { +namespace { -ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook< - absl::raw_logging_internal::LogPrefixHook> - log_prefix_hook; -ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook< - absl::raw_logging_internal::AbortHook> - abort_hook; +// TODO(gfalcon): We want raw-logging to work on as many platforms as possible. +// Explicitly `#error` out when not `ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for +// a selected set of platforms for which we expect not to be able to raw log. + +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES + absl::base_internal::AtomicHook + log_prefix_hook; +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES + absl::base_internal::AtomicHook + abort_hook; #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED -static const char kTruncated[] = " ... (message truncated)\n"; +constexpr char kTruncated[] = " ... (message truncated)\n"; // sprintf the format to the buffer, adjusting *buf and *size to reflect the // consumed bytes, and return whether the message fit without truncation. If // truncation occurred, if possible leave room in the buffer for the message // kTruncated[]. -inline static bool VADoRawLog(char** buf, int* size, const char* format, - va_list ap) ABSL_PRINTF_ATTRIBUTE(3, 0); -inline static bool VADoRawLog(char** buf, int* size, - const char* format, va_list ap) { +bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) + ABSL_PRINTF_ATTRIBUTE(3, 0); +bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) { int n = vsnprintf(*buf, *size, format, ap); bool result = true; if (n < 0 || n > *size) { @@ -96,7 +100,7 @@ inline static bool VADoRawLog(char** buf, int* size, if (static_cast(*size) > sizeof(kTruncated)) { n = *size - sizeof(kTruncated); // room for truncation message } else { - n = 0; // no room for truncation message + n = 0; // no room for truncation message } } *size -= n; @@ -105,9 +109,7 @@ inline static bool VADoRawLog(char** buf, int* size, } #endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED -static constexpr int kLogBufSize = 3000; - -namespace { +constexpr int kLogBufSize = 3000; // CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths // that invoke malloc() and getenv() that might acquire some locks. @@ -166,7 +168,7 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line, } else { DoRawLog(&buf, &size, "%s", kTruncated); } - absl::raw_logging_internal::SafeWriteToStderr(buffer, strlen(buffer)); + SafeWriteToStderr(buffer, strlen(buffer)); } #else static_cast(format); @@ -181,11 +183,18 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line, } } +// Non-formatting version of RawLog(). +// +// TODO(gfalcon): When string_view no longer depends on base, change this +// interface to take its message as a string_view instead. +void DefaultInternalLog(absl::LogSeverity severity, const char* file, int line, + const std::string& message) { + RawLog(severity, file, line, "%.*s", static_cast(message.size()), + message.data()); +} + } // namespace -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace raw_logging_internal { void SafeWriteToStderr(const char *s, size_t len) { #if defined(ABSL_HAVE_SYSCALL_WRITE) syscall(SYS_write, STDERR_FILENO, s, len); @@ -200,8 +209,6 @@ void SafeWriteToStderr(const char *s, size_t len) { #endif } -void RawLog(absl::LogSeverity severity, const char* file, int line, - const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5); void RawLog(absl::LogSeverity severity, const char* file, int line, const char* format, ...) { va_list ap; @@ -210,15 +217,6 @@ void RawLog(absl::LogSeverity severity, const char* file, int line, va_end(ap); } -// Non-formatting version of RawLog(). -// -// TODO(gfalcon): When string_view no longer depends on base, change this -// interface to take its message as a string_view instead. -static void DefaultInternalLog(absl::LogSeverity severity, const char* file, - int line, const std::string& message) { - RawLog(severity, file, line, "%s", message.c_str()); -} - bool RawLoggingFullySupported() { #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED return true; @@ -227,10 +225,14 @@ bool RawLoggingFullySupported() { #endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED } -ABSL_DLL ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL absl::base_internal::AtomicHook internal_log_function(DefaultInternalLog); +void RegisterLogPrefixHook(LogPrefixHook func) { log_prefix_hook.Store(func); } + +void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); } + void RegisterInternalLogFunction(InternalLogFunction func) { internal_log_function.Store(func); } diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/raw_logging.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/raw_logging.h index 418d6c856..2bf7aabac 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/raw_logging.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/raw_logging.h @@ -72,10 +72,14 @@ // // The API is a subset of the above: each macro only takes two arguments. Use // StrCat if you need to build a richer message. -#define ABSL_INTERNAL_LOG(severity, message) \ - do { \ - ::absl::raw_logging_internal::internal_log_function( \ - ABSL_RAW_LOGGING_INTERNAL_##severity, __FILE__, __LINE__, message); \ +#define ABSL_INTERNAL_LOG(severity, message) \ + do { \ + constexpr const char* absl_raw_logging_internal_filename = __FILE__; \ + ::absl::raw_logging_internal::internal_log_function( \ + ABSL_RAW_LOGGING_INTERNAL_##severity, \ + absl_raw_logging_internal_filename, __LINE__, message); \ + if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \ + ABSL_INTERNAL_UNREACHABLE; \ } while (0) #define ABSL_INTERNAL_CHECK(condition, message) \ @@ -170,10 +174,18 @@ using InternalLogFunction = void (*)(absl::LogSeverity severity, const char* file, int line, const std::string& message); -ABSL_DLL ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES extern base_internal::AtomicHook< +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook< InternalLogFunction> internal_log_function; +// Registers hooks of the above types. Only a single hook of each type may be +// registered. It is an error to call these functions multiple times with +// different input arguments. +// +// These functions are safe to call at any point during initialization; they do +// not block or malloc, and are async-signal safe. +void RegisterLogPrefixHook(LogPrefixHook func); +void RegisterAbortHook(AbortHook func); void RegisterInternalLogFunction(InternalLogFunction func); } // namespace raw_logging_internal diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock.cc b/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock.cc index fd0c733e2..35c0696a3 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock.cc @@ -66,35 +66,19 @@ void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock, submit_profile_data.Store(fn); } +// Static member variable definitions. +constexpr uint32_t SpinLock::kSpinLockHeld; +constexpr uint32_t SpinLock::kSpinLockCooperative; +constexpr uint32_t SpinLock::kSpinLockDisabledScheduling; +constexpr uint32_t SpinLock::kSpinLockSleeper; +constexpr uint32_t SpinLock::kWaitTimeMask; + // Uncommon constructors. SpinLock::SpinLock(base_internal::SchedulingMode mode) : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) { ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); } -SpinLock::SpinLock(base_internal::LinkerInitialized, - base_internal::SchedulingMode mode) { - ABSL_TSAN_MUTEX_CREATE(this, 0); - if (IsCooperative(mode)) { - InitLinkerInitializedAndCooperative(); - } - // Otherwise, lockword_ is already initialized. -} - -// Static (linker initialized) spinlocks always start life as functional -// non-cooperative locks. When their static constructor does run, it will call -// this initializer to augment the lockword with the cooperative bit. By -// actually taking the lock when we do this we avoid the need for an atomic -// operation in the regular unlock path. -// -// SlowLock() must be careful to re-test for this bit so that any outstanding -// waiters may be upgraded to cooperative status. -void SpinLock::InitLinkerInitializedAndCooperative() { - Lock(); - lockword_.fetch_or(kSpinLockCooperative, std::memory_order_relaxed); - Unlock(); -} - // Monitor the lock to see if its value changes within some time period // (adaptive_spin_count loop iterations). The last value read from the lock // is returned from the method. @@ -121,6 +105,14 @@ void SpinLock::SlowLock() { if ((lock_value & kSpinLockHeld) == 0) { return; } + + base_internal::SchedulingMode scheduling_mode; + if ((lock_value & kSpinLockCooperative) != 0) { + scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + } else { + scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY; + } + // The lock was not obtained initially, so this thread needs to wait for // it. Record the current timestamp in the local variable wait_start_time // so the total wait time can be stored in the lockword once this thread @@ -133,8 +125,9 @@ void SpinLock::SlowLock() { // it as having a sleeper. if ((lock_value & kWaitTimeMask) == 0) { // Here, just "mark" that the thread is going to sleep. Don't store the - // lock wait time in the lock as that will cause the current lock - // owner to think it experienced contention. + // lock wait time in the lock -- the lock word stores the amount of time + // that the current holder waited before acquiring the lock, not the wait + // time of any thread currently waiting to acquire it. if (lockword_.compare_exchange_strong( lock_value, lock_value | kSpinLockSleeper, std::memory_order_relaxed, std::memory_order_relaxed)) { @@ -148,15 +141,17 @@ void SpinLock::SlowLock() { // this thread obtains the lock. lock_value = TryLockInternal(lock_value, wait_cycles); continue; // Skip the delay at the end of the loop. + } else if ((lock_value & kWaitTimeMask) == 0) { + // The lock is still held, without a waiter being marked, but something + // else about the lock word changed, causing our CAS to fail. For + // example, a new lock holder may have acquired the lock with + // kSpinLockDisabledScheduling set, whereas the previous holder had not + // set that flag. In this case, attempt again to mark ourselves as a + // waiter. + continue; } } - base_internal::SchedulingMode scheduling_mode; - if ((lock_value & kSpinLockCooperative) != 0) { - scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; - } else { - scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY; - } // SpinLockDelay() calls into fiber scheduler, we need to see // synchronization there to avoid false positives. ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock.h index 89e93aad0..6d8d8dddd 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock.h @@ -15,17 +15,16 @@ // // Most users requiring mutual exclusion should use Mutex. -// SpinLock is provided for use in three situations: -// - for use in code that Mutex itself depends on -// - to get a faster fast-path release under low contention (without an -// atomic read-modify-write) In return, SpinLock has worse behaviour under -// contention, which is why Mutex is preferred in most situations. +// SpinLock is provided for use in two situations: +// - for use by Abseil internal code that Mutex itself depends on // - for async signal safety (see below) // SpinLock is async signal safe. If a spinlock is used within a signal // handler, all code that acquires the lock must ensure that the signal cannot // arrive while they are holding the lock. Typically, this is done by blocking // the signal. +// +// Threads waiting on a SpinLock may be woken in an arbitrary order. #ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_ #define ABSL_BASE_INTERNAL_SPINLOCK_H_ @@ -36,6 +35,7 @@ #include #include "absl/base/attributes.h" +#include "absl/base/const_init.h" #include "absl/base/dynamic_annotations.h" #include "absl/base/internal/low_level_scheduling.h" #include "absl/base/internal/raw_logging.h" @@ -55,29 +55,22 @@ class ABSL_LOCKABLE SpinLock { ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); } - // Special constructor for use with static SpinLock objects. E.g., - // - // static SpinLock lock(base_internal::kLinkerInitialized); - // - // When initialized using this constructor, we depend on the fact - // that the linker has already initialized the memory appropriately. The lock - // is initialized in non-cooperative mode. - // - // A SpinLock constructed like this can be freely used from global - // initializers without worrying about the order in which global - // initializers run. - explicit SpinLock(base_internal::LinkerInitialized) { - // Does nothing; lockword_ is already initialized - ABSL_TSAN_MUTEX_CREATE(this, 0); - } - // Constructors that allow non-cooperative spinlocks to be created for use // inside thread schedulers. Normal clients should not use these. explicit SpinLock(base_internal::SchedulingMode mode); - SpinLock(base_internal::LinkerInitialized, - base_internal::SchedulingMode mode); + // Constructor for global SpinLock instances. See absl/base/const_init.h. + constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode) + : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {} + + // For global SpinLock instances prefer trivial destructor when possible. + // Default but non-trivial destructor in some build configurations causes an + // extra static initializer. +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); } +#else + ~SpinLock() = default; +#endif // Acquire this SpinLock. inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { @@ -127,6 +120,14 @@ class ABSL_LOCKABLE SpinLock { return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0; } + // Return immediately if this thread holds the SpinLock exclusively. + // Otherwise, report an error by crashing with a diagnostic. + inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() { + if (!IsHeld()) { + ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock"); + } + } + protected: // These should not be exported except for testing. @@ -146,8 +147,20 @@ class ABSL_LOCKABLE SpinLock { // // bit[0] encodes whether a lock is being held. // bit[1] encodes whether a lock uses cooperative scheduling. - // bit[2] encodes whether a lock disables scheduling. + // bit[2] encodes whether the current lock holder disabled scheduling when + // acquiring the lock. Only set when kSpinLockHeld is also set. // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int. + // This is set by the lock holder to indicate how long it waited on + // the lock before eventually acquiring it. The number of cycles is + // encoded as a 29-bit unsigned int, or in the case that the current + // holder did not wait but another waiter is queued, the LSB + // (kSpinLockSleeper) is set. The implementation does not explicitly + // track the number of queued waiters beyond this. It must always be + // assumed that waiters may exist if the current holder was required to + // queue. + // + // Invariant: if the lock is not held, the value is either 0 or + // kSpinLockCooperative. static constexpr uint32_t kSpinLockHeld = 1; static constexpr uint32_t kSpinLockCooperative = 2; static constexpr uint32_t kSpinLockDisabledScheduling = 4; @@ -163,7 +176,6 @@ class ABSL_LOCKABLE SpinLock { } uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles); - void InitLinkerInitializedAndCooperative(); void SlowLock() ABSL_ATTRIBUTE_COLD; void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD; uint32_t SpinLoop(); diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_akaros.inc b/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_akaros.inc index bc468940f..7b0cada4f 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_akaros.inc +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_akaros.inc @@ -20,7 +20,7 @@ extern "C" { -ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( std::atomic* /* lock_word */, uint32_t /* value */, int /* loop */, absl::base_internal::SchedulingMode /* mode */) { // In Akaros, one must take care not to call anything that could cause a @@ -29,7 +29,7 @@ ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( // arbitrary code. } -ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake( +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( std::atomic* /* lock_word */, bool /* all */) {} } // extern "C" diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_linux.inc b/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_linux.inc index 323edd62f..202f7cdfc 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_linux.inc +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_linux.inc @@ -46,9 +46,17 @@ static_assert(sizeof(std::atomic) == sizeof(int), #endif #endif +#if defined(__NR_futex_time64) && !defined(SYS_futex_time64) +#define SYS_futex_time64 __NR_futex_time64 +#endif + +#if defined(SYS_futex_time64) && !defined(SYS_futex) +#define SYS_futex SYS_futex_time64 +#endif + extern "C" { -ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( std::atomic *w, uint32_t value, int loop, absl::base_internal::SchedulingMode) { absl::base_internal::ErrnoSaver errno_saver; @@ -58,8 +66,8 @@ ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm); } -ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(std::atomic *w, - bool all) { +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic *w, bool all) { syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0); } diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_posix.inc b/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_posix.inc index fcd21b151..4f6f887d9 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_posix.inc +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_posix.inc @@ -25,7 +25,7 @@ extern "C" { -ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( std::atomic* /* lock_word */, uint32_t /* value */, int loop, absl::base_internal::SchedulingMode /* mode */) { absl::base_internal::ErrnoSaver errno_saver; @@ -40,7 +40,7 @@ ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( } } -ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake( +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( std::atomic* /* lock_word */, bool /* all */) {} } // extern "C" diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_wait.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_wait.h index 169bc749f..9a1adcda5 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_wait.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_wait.h @@ -39,22 +39,22 @@ struct SpinLockWaitTransition { // satisfying 0<=i *w, int n, const SpinLockWaitTransition trans[], SchedulingMode scheduling_mode); -// If possible, wake some thread that has called SpinLockDelay(w, ...). If -// "all" is true, wake all such threads. This call is a hint, and on some -// systems it may be a no-op; threads calling SpinLockDelay() will always wake -// eventually even if SpinLockWake() is never called. +// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all` +// is true, wake all such threads. On some systems, this may be a no-op; on +// those systems, threads calling SpinLockDelay() will always wake eventually +// even if SpinLockWake() is never called. void SpinLockWake(std::atomic *w, bool all); // Wait for an appropriate spin delay on iteration "loop" of a // spin loop on location *w, whose previously observed value was "value". // SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick, -// or may wait for a delay that can be truncated by a call to SpinLockWake(w). -// In all cases, it must return in bounded time even if SpinLockWake() is not -// called. +// or may wait for a call to SpinLockWake(w). void SpinLockDelay(std::atomic *w, uint32_t value, int loop, base_internal::SchedulingMode scheduling_mode); @@ -73,21 +73,23 @@ ABSL_NAMESPACE_END // By changing our extension points to be extern "C", we dodge this // check. extern "C" { -void AbslInternalSpinLockWake(std::atomic *w, bool all); -void AbslInternalSpinLockDelay( +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic *w, + bool all); +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( std::atomic *w, uint32_t value, int loop, absl::base_internal::SchedulingMode scheduling_mode); } inline void absl::base_internal::SpinLockWake(std::atomic *w, bool all) { - AbslInternalSpinLockWake(w, all); + ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all); } inline void absl::base_internal::SpinLockDelay( std::atomic *w, uint32_t value, int loop, absl::base_internal::SchedulingMode scheduling_mode) { - AbslInternalSpinLockDelay(w, value, loop, scheduling_mode); + ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay) + (w, value, loop, scheduling_mode); } #endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_win32.inc b/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_win32.inc index 78654b5b5..9d224813a 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_win32.inc +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/spinlock_win32.inc @@ -20,9 +20,9 @@ extern "C" { -void AbslInternalSpinLockDelay(std::atomic* /* lock_word */, - uint32_t /* value */, int loop, - absl::base_internal::SchedulingMode /* mode */) { +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic* /* lock_word */, uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { if (loop == 0) { } else if (loop == 1) { Sleep(0); @@ -31,7 +31,7 @@ void AbslInternalSpinLockDelay(std::atomic* /* lock_word */, } } -void AbslInternalSpinLockWake(std::atomic* /* lock_word */, - bool /* all */) {} +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic* /* lock_word */, bool /* all */) {} } // extern "C" diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/strerror.cc b/TMessagesProj/jni/voip/webrtc/absl/base/internal/strerror.cc index af181513c..0d6226fd0 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/strerror.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/strerror.cc @@ -14,6 +14,7 @@ #include "absl/base/internal/strerror.h" +#include #include #include #include @@ -21,13 +22,13 @@ #include #include -#include "absl/base/attributes.h" #include "absl/base/internal/errno_saver.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { namespace { + const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) { #if defined(_WIN32) int rc = strerror_s(buf, buflen, errnum); @@ -35,15 +36,6 @@ const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) { if (rc == 0 && strncmp(buf, "Unknown error", buflen) == 0) *buf = '\0'; return buf; #else -#if defined(__GLIBC__) || defined(__APPLE__) - // Use the BSD sys_errlist API provided by GNU glibc and others to - // avoid any need to copy the message into the local buffer first. - if (0 <= errnum && errnum < sys_nerr) { - if (const char* p = sys_errlist[errnum]) { - return p; - } - } -#endif // The type of `ret` is platform-specific; both of these branches must compile // either way but only one will execute on any given platform: auto ret = strerror_r(errnum, buf, buflen); @@ -57,10 +49,8 @@ const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) { } #endif } -} // namespace -std::string StrError(int errnum) { - absl::base_internal::ErrnoSaver errno_saver; +std::string StrErrorInternal(int errnum) { char buf[100]; const char* str = StrErrorAdaptor(errnum, buf, sizeof buf); if (*str == '\0') { @@ -70,6 +60,29 @@ std::string StrError(int errnum) { return str; } +// kSysNerr is the number of errors from a recent glibc. `StrError()` falls back +// to `StrErrorAdaptor()` if the value is larger than this. +constexpr int kSysNerr = 135; + +std::array* NewStrErrorTable() { + auto* table = new std::array; + for (int i = 0; i < static_cast(table->size()); ++i) { + (*table)[i] = StrErrorInternal(i); + } + return table; +} + +} // namespace + +std::string StrError(int errnum) { + absl::base_internal::ErrnoSaver errno_saver; + static const auto* table = NewStrErrorTable(); + if (errnum >= 0 && errnum < static_cast(table->size())) { + return (*table)[errnum]; + } + return StrErrorInternal(errnum); +} + } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/strerror_benchmark.cc b/TMessagesProj/jni/voip/webrtc/absl/base/internal/strerror_benchmark.cc index d8ca86b95..c9ab14a89 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/strerror_benchmark.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/strerror_benchmark.cc @@ -20,15 +20,6 @@ #include "benchmark/benchmark.h" namespace { -#if defined(__GLIBC__) || defined(__APPLE__) -void BM_SysErrList(benchmark::State& state) { - for (auto _ : state) { - benchmark::DoNotOptimize(std::string(sys_errlist[ERANGE])); - } -} -BENCHMARK(BM_SysErrList); -#endif - void BM_AbslStrError(benchmark::State& state) { for (auto _ : state) { benchmark::DoNotOptimize(absl::base_internal::StrError(ERANGE)); diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/sysinfo.cc b/TMessagesProj/jni/voip/webrtc/absl/base/internal/sysinfo.cc index 0bed0d8cc..a7cfb461f 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/sysinfo.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/sysinfo.cc @@ -39,6 +39,7 @@ #endif #include + #include #include #include @@ -50,17 +51,88 @@ #include #include "absl/base/call_once.h" +#include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/spinlock.h" #include "absl/base/internal/unscaledcycleclock.h" +#include "absl/base/thread_annotations.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { +namespace { + +#if defined(_WIN32) + +// Returns number of bits set in `bitMask` +DWORD Win32CountSetBits(ULONG_PTR bitMask) { + for (DWORD bitSetCount = 0; ; ++bitSetCount) { + if (bitMask == 0) return bitSetCount; + bitMask &= bitMask - 1; + } +} + +// Returns the number of logical CPUs using GetLogicalProcessorInformation(), or +// 0 if the number of processors is not available or can not be computed. +// https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getlogicalprocessorinformation +int Win32NumCPUs() { +#pragma comment(lib, "kernel32.lib") + using Info = SYSTEM_LOGICAL_PROCESSOR_INFORMATION; + + DWORD info_size = sizeof(Info); + Info* info(static_cast(malloc(info_size))); + if (info == nullptr) return 0; + + bool success = GetLogicalProcessorInformation(info, &info_size); + if (!success && GetLastError() == ERROR_INSUFFICIENT_BUFFER) { + free(info); + info = static_cast(malloc(info_size)); + if (info == nullptr) return 0; + success = GetLogicalProcessorInformation(info, &info_size); + } + + DWORD logicalProcessorCount = 0; + if (success) { + Info* ptr = info; + DWORD byteOffset = 0; + while (byteOffset + sizeof(Info) <= info_size) { + switch (ptr->Relationship) { + case RelationProcessorCore: + logicalProcessorCount += Win32CountSetBits(ptr->ProcessorMask); + break; + + case RelationNumaNode: + case RelationCache: + case RelationProcessorPackage: + // Ignore other entries + break; + + default: + // Ignore unknown entries + break; + } + byteOffset += sizeof(Info); + ptr++; + } + } + free(info); + return logicalProcessorCount; +} + +#endif + +} // namespace + + static int GetNumCPUs() { #if defined(__myriad2__) return 1; +#elif defined(_WIN32) + const unsigned hardware_concurrency = Win32NumCPUs(); + return hardware_concurrency ? hardware_concurrency : 1; +#elif defined(_AIX) + return sysconf(_SC_NPROCESSORS_ONLN); #else // Other possibilities: // - Read /sys/devices/system/cpu/online and use cpumask_parse() @@ -343,15 +415,16 @@ pid_t GetTID() { #else // Fallback implementation of GetTID using pthread_getspecific. -static once_flag tid_once; -static pthread_key_t tid_key; -static absl::base_internal::SpinLock tid_lock( - absl::base_internal::kLinkerInitialized); +ABSL_CONST_INIT static once_flag tid_once; +ABSL_CONST_INIT static pthread_key_t tid_key; +ABSL_CONST_INIT static absl::base_internal::SpinLock tid_lock( + absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); // We set a bit per thread in this array to indicate that an ID is in // use. ID 0 is unused because it is the default value returned by // pthread_getspecific(). -static std::vector* tid_array ABSL_GUARDED_BY(tid_lock) = nullptr; +ABSL_CONST_INIT static std::vector *tid_array + ABSL_GUARDED_BY(tid_lock) = nullptr; static constexpr int kBitsPerWord = 32; // tid_array is uint32_t. // Returns the TID to tid_array. @@ -418,6 +491,18 @@ pid_t GetTID() { #endif +// GetCachedTID() caches the thread ID in thread-local storage (which is a +// userspace construct) to avoid unnecessary system calls. Without this caching, +// it can take roughly 98ns, while it takes roughly 1ns with this caching. +pid_t GetCachedTID() { +#ifdef ABSL_HAVE_THREAD_LOCAL + static thread_local pid_t thread_id = GetTID(); + return thread_id; +#else + return GetTID(); +#endif // ABSL_HAVE_THREAD_LOCAL +} + } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/sysinfo.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/sysinfo.h index 7246d5dd9..119cf1f0e 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/sysinfo.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/sysinfo.h @@ -30,6 +30,7 @@ #include +#include "absl/base/config.h" #include "absl/base/port.h" namespace absl { @@ -59,6 +60,13 @@ using pid_t = uint32_t; #endif pid_t GetTID(); +// Like GetTID(), but caches the result in thread-local storage in order +// to avoid unnecessary system calls. Note that there are some cases where +// one must call through to GetTID directly, which is why this exists as a +// separate function. For example, GetCachedTID() is not safe to call in +// an asynchronous signal-handling context nor right after a call to fork(). +pid_t GetCachedTID(); + } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/thread_annotations.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/thread_annotations.h new file mode 100644 index 000000000..4dab6a9c1 --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/thread_annotations.h @@ -0,0 +1,271 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: thread_annotations.h +// ----------------------------------------------------------------------------- +// +// WARNING: This is a backwards compatible header and it will be removed after +// the migration to prefixed thread annotations is finished; please include +// "absl/base/thread_annotations.h". +// +// This header file contains macro definitions for thread safety annotations +// that allow developers to document the locking policies of multi-threaded +// code. The annotations can also help program analysis tools to identify +// potential thread safety issues. +// +// These annotations are implemented using compiler attributes. Using the macros +// defined here instead of raw attributes allow for portability and future +// compatibility. +// +// When referring to mutexes in the arguments of the attributes, you should +// use variable names or more complex expressions (e.g. my_object->mutex_) +// that evaluate to a concrete mutex object whenever possible. If the mutex +// you want to refer to is not in scope, you may use a member pointer +// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. + +#ifndef ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ +#define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ + +#if defined(__clang__) +#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) +#else +#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op +#endif + +// GUARDED_BY() +// +// Documents if a shared field or global variable needs to be protected by a +// mutex. GUARDED_BY() allows the user to specify a particular mutex that +// should be held when accessing the annotated variable. +// +// Although this annotation (and PT_GUARDED_BY, below) cannot be applied to +// local variables, a local variable and its associated mutex can often be +// combined into a small class or struct, thereby allowing the annotation. +// +// Example: +// +// class Foo { +// Mutex mu_; +// int p1_ GUARDED_BY(mu_); +// ... +// }; +#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) + +// PT_GUARDED_BY() +// +// Documents if the memory location pointed to by a pointer should be guarded +// by a mutex when dereferencing the pointer. +// +// Example: +// class Foo { +// Mutex mu_; +// int *p1_ PT_GUARDED_BY(mu_); +// ... +// }; +// +// Note that a pointer variable to a shared memory location could itself be a +// shared variable. +// +// Example: +// +// // `q_`, guarded by `mu1_`, points to a shared memory location that is +// // guarded by `mu2_`: +// int *q_ GUARDED_BY(mu1_) PT_GUARDED_BY(mu2_); +#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) + +// ACQUIRED_AFTER() / ACQUIRED_BEFORE() +// +// Documents the acquisition order between locks that can be held +// simultaneously by a thread. For any two locks that need to be annotated +// to establish an acquisition order, only one of them needs the annotation. +// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER +// and ACQUIRED_BEFORE.) +// +// As with GUARDED_BY, this is only applicable to mutexes that are shared +// fields or global variables. +// +// Example: +// +// Mutex m1_; +// Mutex m2_ ACQUIRED_AFTER(m1_); +#define ACQUIRED_AFTER(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) + +#define ACQUIRED_BEFORE(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) + +// EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED() +// +// Documents a function that expects a mutex to be held prior to entry. +// The mutex is expected to be held both on entry to, and exit from, the +// function. +// +// An exclusive lock allows read-write access to the guarded data member(s), and +// only one thread can acquire a lock exclusively at any one time. A shared lock +// allows read-only access, and any number of threads can acquire a shared lock +// concurrently. +// +// Generally, non-const methods should be annotated with +// EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with +// SHARED_LOCKS_REQUIRED. +// +// Example: +// +// Mutex mu1, mu2; +// int a GUARDED_BY(mu1); +// int b GUARDED_BY(mu2); +// +// void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } +// void bar() const SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } +#define EXCLUSIVE_LOCKS_REQUIRED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) + +#define SHARED_LOCKS_REQUIRED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__)) + +// LOCKS_EXCLUDED() +// +// Documents the locks acquired in the body of the function. These locks +// cannot be held when calling this function (as Abseil's `Mutex` locks are +// non-reentrant). +#define LOCKS_EXCLUDED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) + +// LOCK_RETURNED() +// +// Documents a function that returns a mutex without acquiring it. For example, +// a public getter method that returns a pointer to a private mutex should +// be annotated with LOCK_RETURNED. +#define LOCK_RETURNED(x) \ + THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) + +// LOCKABLE +// +// Documents if a class/type is a lockable type (such as the `Mutex` class). +#define LOCKABLE \ + THREAD_ANNOTATION_ATTRIBUTE__(lockable) + +// SCOPED_LOCKABLE +// +// Documents if a class does RAII locking (such as the `MutexLock` class). +// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is +// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no +// arguments; the analysis will assume that the destructor unlocks whatever the +// constructor locked. +#define SCOPED_LOCKABLE \ + THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) + +// EXCLUSIVE_LOCK_FUNCTION() +// +// Documents functions that acquire a lock in the body of a function, and do +// not release it. +#define EXCLUSIVE_LOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__)) + +// SHARED_LOCK_FUNCTION() +// +// Documents functions that acquire a shared (reader) lock in the body of a +// function, and do not release it. +#define SHARED_LOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__)) + +// UNLOCK_FUNCTION() +// +// Documents functions that expect a lock to be held on entry to the function, +// and release it in the body of the function. +#define UNLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__)) + +// EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION() +// +// Documents functions that try to acquire a lock, and return success or failure +// (or a non-boolean value that can be interpreted as a boolean). +// The first argument should be `true` for functions that return `true` on +// success, or `false` for functions that return `false` on success. The second +// argument specifies the mutex that is locked on success. If unspecified, this +// mutex is assumed to be `this`. +#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__)) + +#define SHARED_TRYLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__)) + +// ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK() +// +// Documents functions that dynamically check to see if a lock is held, and fail +// if it is not held. +#define ASSERT_EXCLUSIVE_LOCK(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__)) + +#define ASSERT_SHARED_LOCK(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__)) + +// NO_THREAD_SAFETY_ANALYSIS +// +// Turns off thread safety checking within the body of a particular function. +// This annotation is used to mark functions that are known to be correct, but +// the locking behavior is more complicated than the analyzer can handle. +#define NO_THREAD_SAFETY_ANALYSIS \ + THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) + +//------------------------------------------------------------------------------ +// Tool-Supplied Annotations +//------------------------------------------------------------------------------ + +// TS_UNCHECKED should be placed around lock expressions that are not valid +// C++ syntax, but which are present for documentation purposes. These +// annotations will be ignored by the analysis. +#define TS_UNCHECKED(x) "" + +// TS_FIXME is used to mark lock expressions that are not valid C++ syntax. +// It is used by automated tools to mark and disable invalid expressions. +// The annotation should either be fixed, or changed to TS_UNCHECKED. +#define TS_FIXME(x) "" + +// Like NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of +// a particular function. However, this attribute is used to mark functions +// that are incorrect and need to be fixed. It is used by automated tools to +// avoid breaking the build when the analysis is updated. +// Code owners are expected to eventually fix the routine. +#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS + +// Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY +// annotation that needs to be fixed, because it is producing thread safety +// warning. It disables the GUARDED_BY. +#define GUARDED_BY_FIXME(x) + +// Disables warnings for a single read operation. This can be used to avoid +// warnings when it is known that the read is not actually involved in a race, +// but the compiler cannot confirm that. +#define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x) + + +namespace thread_safety_analysis { + +// Takes a reference to a guarded data member, and returns an unguarded +// reference. +template +inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +template +inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +} // namespace thread_safety_analysis + +#endif // ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/thread_identity.cc b/TMessagesProj/jni/voip/webrtc/absl/base/internal/thread_identity.cc index d63a04ae9..9950e63a7 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/thread_identity.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/thread_identity.cc @@ -23,6 +23,7 @@ #include #include +#include "absl/base/attributes.h" #include "absl/base/call_once.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/spinlock.h" @@ -53,9 +54,11 @@ void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) { // exist within a process (via dlopen() or similar), references to // thread_identity_ptr from each instance of the code will refer to // *different* instances of this ptr. -#ifdef __GNUC__ +// Apple platforms have the visibility attribute, but issue a compile warning +// that protected visibility is unsupported. +#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__) __attribute__((visibility("protected"))) -#endif // __GNUC__ +#endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__) #if ABSL_PER_THREAD_TLS // Prefer __thread to thread_local as benchmarks indicate it is a bit faster. ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr; @@ -117,10 +120,10 @@ void SetCurrentThreadIdentity( ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 // Please see the comment on `CurrentThreadIdentityIfPresent` in -// thread_identity.h. Because DLLs cannot expose thread_local variables in -// headers, we opt for the correct-but-slower option of placing the definition -// of this function only in a translation unit inside DLL. -#if defined(ABSL_BUILD_DLL) || defined(ABSL_CONSUME_DLL) +// thread_identity.h. When we cannot expose thread_local variables in +// headers, we opt for the correct-but-slower option of not inlining this +// function. +#ifndef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT ThreadIdentity* CurrentThreadIdentityIfPresent() { return thread_identity_ptr; } #endif #endif diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/thread_identity.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/thread_identity.h index ceb109b41..659694b32 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/thread_identity.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/thread_identity.h @@ -32,6 +32,7 @@ #include "absl/base/config.h" #include "absl/base/internal/per_thread_tls.h" +#include "absl/base/optimization.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -69,30 +70,28 @@ struct PerThreadSynch { // is using this PerThreadSynch as a terminator. Its // skip field must not be filled in because the loop // might then skip over the terminator. - - // The wait parameters of the current wait. waitp is null if the - // thread is not waiting. Transitions from null to non-null must - // occur before the enqueue commit point (state = kQueued in - // Enqueue() and CondVarEnqueue()). Transitions from non-null to - // null must occur after the wait is finished (state = kAvailable in - // Mutex::Block() and CondVar::WaitCommon()). This field may be - // changed only by the thread that describes this PerThreadSynch. A - // special case is Fer(), which calls Enqueue() on another thread, - // but with an identical SynchWaitParams pointer, thus leaving the - // pointer unchanged. - SynchWaitParams *waitp; - - bool suppress_fatal_errors; // If true, try to proceed even in the face of - // broken invariants. This is used within fatal - // signal handlers to improve the chances of - // debug logging information being output - // successfully. - - intptr_t readers; // Number of readers in mutex. - int priority; // Priority of thread (updated every so often). - - // When priority will next be read (cycles). - int64_t next_priority_read_cycles; + bool wake; // This thread is to be woken from a Mutex. + // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the + // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. + // + // The value of "x->cond_waiter" is meaningless if "x" is not on a + // Mutex waiter list. + bool cond_waiter; + bool maybe_unlocking; // Valid at head of Mutex waiter queue; + // true if UnlockSlow could be searching + // for a waiter to wake. Used for an optimization + // in Enqueue(). true is always a valid value. + // Can be reset to false when the unlocker or any + // writer releases the lock, or a reader fully + // releases the lock. It may not be set to false + // by a reader that decrements the count to + // non-zero. protected by mutex spinlock + bool suppress_fatal_errors; // If true, try to proceed even in the face + // of broken invariants. This is used within + // fatal signal handlers to improve the + // chances of debug logging information being + // output successfully. + int priority; // Priority of thread (updated every so often). // State values: // kAvailable: This PerThreadSynch is available. @@ -111,30 +110,30 @@ struct PerThreadSynch { }; std::atomic state; - bool maybe_unlocking; // Valid at head of Mutex waiter queue; - // true if UnlockSlow could be searching - // for a waiter to wake. Used for an optimization - // in Enqueue(). true is always a valid value. - // Can be reset to false when the unlocker or any - // writer releases the lock, or a reader fully releases - // the lock. It may not be set to false by a reader - // that decrements the count to non-zero. - // protected by mutex spinlock + // The wait parameters of the current wait. waitp is null if the + // thread is not waiting. Transitions from null to non-null must + // occur before the enqueue commit point (state = kQueued in + // Enqueue() and CondVarEnqueue()). Transitions from non-null to + // null must occur after the wait is finished (state = kAvailable in + // Mutex::Block() and CondVar::WaitCommon()). This field may be + // changed only by the thread that describes this PerThreadSynch. A + // special case is Fer(), which calls Enqueue() on another thread, + // but with an identical SynchWaitParams pointer, thus leaving the + // pointer unchanged. + SynchWaitParams* waitp; - bool wake; // This thread is to be woken from a Mutex. + intptr_t readers; // Number of readers in mutex. - // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the - // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. - // - // The value of "x->cond_waiter" is meaningless if "x" is not on a - // Mutex waiter list. - bool cond_waiter; + // When priority will next be read (cycles). + int64_t next_priority_read_cycles; // Locks held; used during deadlock detection. // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). SynchLocksHeld *all_locks; }; +// The instances of this class are allocated in NewThreadIdentity() with an +// alignment of PerThreadSynch::kAlignment. struct ThreadIdentity { // Must be the first member. The Mutex implementation requires that // the PerThreadSynch object associated with each thread is @@ -144,7 +143,7 @@ struct ThreadIdentity { // Private: Reserved for absl::synchronization_internal::Waiter. struct WaiterState { - char data[128]; + alignas(void*) char data[128]; } waiter_state; // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter(). @@ -189,30 +188,32 @@ void ClearCurrentThreadIdentity(); // May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE= #ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC -#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be direcly set +#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be directly set #else #define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0 #endif #ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS -#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be direcly set +#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be directly set #else #define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1 #endif #ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11 -#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be direcly set +#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be directly set #else #define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2 #endif #ifdef ABSL_THREAD_IDENTITY_MODE -#error ABSL_THREAD_IDENTITY_MODE cannot be direcly set +#error ABSL_THREAD_IDENTITY_MODE cannot be directly set #elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE) #define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE #elif defined(_WIN32) && !defined(__MINGW32__) #define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 -#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ +#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL) +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ (__GOOGLE_GRTE_VERSION__ >= 20140228L) // Support for async-safe TLS was specifically added in GRTEv4. It's not // present in the upstream eglibc. @@ -235,13 +236,18 @@ ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr; #error Thread-local storage not detected on this platform #endif -// thread_local variables cannot be in headers exposed by DLLs. However, it is -// important for performance reasons in general that -// `CurrentThreadIdentityIfPresent` be inlined. This is not possible across a -// DLL boundary so, with DLLs, we opt to have the function not be inlined. Note +// thread_local variables cannot be in headers exposed by DLLs or in certain +// build configurations on Apple platforms. However, it is important for +// performance reasons in general that `CurrentThreadIdentityIfPresent` be +// inlined. In the other cases we opt to have the function not be inlined. Note // that `CurrentThreadIdentityIfPresent` is declared above so we can exclude -// this entire inline definition when compiling as a DLL. -#if !defined(ABSL_BUILD_DLL) && !defined(ABSL_CONSUME_DLL) +// this entire inline definition. +#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \ + !defined(ABSL_CONSUME_DLL) +#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1 +#endif + +#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT inline ThreadIdentity* CurrentThreadIdentityIfPresent() { return thread_identity_ptr; } diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/throw_delegate.cc b/TMessagesProj/jni/voip/webrtc/absl/base/internal/throw_delegate.cc index c055f75d9..c260ff1ee 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/throw_delegate.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/throw_delegate.cc @@ -18,6 +18,7 @@ #include #include #include + #include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" @@ -25,83 +26,186 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { +// NOTE: The various STL exception throwing functions are placed within the +// #ifdef blocks so the symbols aren't exposed on platforms that don't support +// them, such as the Android NDK. For example, ANGLE fails to link when building +// within AOSP without them, since the STL functions don't exist. namespace { +#ifdef ABSL_HAVE_EXCEPTIONS template [[noreturn]] void Throw(const T& error) { -#ifdef ABSL_HAVE_EXCEPTIONS throw error; -#else - ABSL_RAW_LOG(FATAL, "%s", error.what()); - std::abort(); -#endif } +#endif } // namespace void ThrowStdLogicError(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::logic_error(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif } void ThrowStdLogicError(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::logic_error(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif } void ThrowStdInvalidArgument(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::invalid_argument(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif } void ThrowStdInvalidArgument(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::invalid_argument(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif } void ThrowStdDomainError(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::domain_error(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif } void ThrowStdDomainError(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::domain_error(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif } void ThrowStdLengthError(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::length_error(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif } void ThrowStdLengthError(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::length_error(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif } void ThrowStdOutOfRange(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::out_of_range(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif } void ThrowStdOutOfRange(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::out_of_range(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif } void ThrowStdRuntimeError(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::runtime_error(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif } void ThrowStdRuntimeError(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::runtime_error(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif } void ThrowStdRangeError(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::range_error(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif } void ThrowStdRangeError(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::range_error(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif } void ThrowStdOverflowError(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::overflow_error(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif } void ThrowStdOverflowError(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::overflow_error(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif } void ThrowStdUnderflowError(const std::string& what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::underflow_error(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); + std::abort(); +#endif } void ThrowStdUnderflowError(const char* what_arg) { +#ifdef ABSL_HAVE_EXCEPTIONS Throw(std::underflow_error(what_arg)); +#else + ABSL_RAW_LOG(FATAL, "%s", what_arg); + std::abort(); +#endif } -void ThrowStdBadFunctionCall() { Throw(std::bad_function_call()); } +void ThrowStdBadFunctionCall() { +#ifdef ABSL_HAVE_EXCEPTIONS + Throw(std::bad_function_call()); +#else + std::abort(); +#endif +} -void ThrowStdBadAlloc() { Throw(std::bad_alloc()); } +void ThrowStdBadAlloc() { +#ifdef ABSL_HAVE_EXCEPTIONS + Throw(std::bad_alloc()); +#else + std::abort(); +#endif +} } // namespace base_internal ABSL_NAMESPACE_END diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/tsan_mutex_interface.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/tsan_mutex_interface.h index 2a510603b..39207d8a5 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/tsan_mutex_interface.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/tsan_mutex_interface.h @@ -19,6 +19,8 @@ #ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ #define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ +#include "absl/base/config.h" + // ABSL_INTERNAL_HAVE_TSAN_INTERFACE // Macro intended only for internal use. // @@ -28,7 +30,7 @@ #error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set." #endif -#if defined(THREAD_SANITIZER) && defined(__has_include) +#if defined(ABSL_HAVE_THREAD_SANITIZER) && defined(__has_include) #if __has_include() #define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1 #endif diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/unaligned_access.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/unaligned_access.h index 6be56c865..093dd9b49 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/unaligned_access.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/unaligned_access.h @@ -31,80 +31,6 @@ // The unaligned API is C++ only. The declarations use C++ features // (namespaces, inline) which are absent or incompatible in C. #if defined(__cplusplus) - -#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||\ - defined(MEMORY_SANITIZER) -// Consider we have an unaligned load/store of 4 bytes from address 0x...05. -// AddressSanitizer will treat it as a 3-byte access to the range 05:07 and -// will miss a bug if 08 is the first unaddressable byte. -// ThreadSanitizer will also treat this as a 3-byte access to 05:07 and will -// miss a race between this access and some other accesses to 08. -// MemorySanitizer will correctly propagate the shadow on unaligned stores -// and correctly report bugs on unaligned loads, but it may not properly -// update and report the origin of the uninitialized memory. -// For all three tools, replacing an unaligned access with a tool-specific -// callback solves the problem. - -// Make sure uint16_t/uint32_t/uint64_t are defined. -#include - -extern "C" { -uint16_t __sanitizer_unaligned_load16(const void *p); -uint32_t __sanitizer_unaligned_load32(const void *p); -uint64_t __sanitizer_unaligned_load64(const void *p); -void __sanitizer_unaligned_store16(void *p, uint16_t v); -void __sanitizer_unaligned_store32(void *p, uint32_t v); -void __sanitizer_unaligned_store64(void *p, uint64_t v); -} // extern "C" - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -inline uint16_t UnalignedLoad16(const void *p) { - return __sanitizer_unaligned_load16(p); -} - -inline uint32_t UnalignedLoad32(const void *p) { - return __sanitizer_unaligned_load32(p); -} - -inline uint64_t UnalignedLoad64(const void *p) { - return __sanitizer_unaligned_load64(p); -} - -inline void UnalignedStore16(void *p, uint16_t v) { - __sanitizer_unaligned_store16(p, v); -} - -inline void UnalignedStore32(void *p, uint32_t v) { - __sanitizer_unaligned_store32(p, v); -} - -inline void UnalignedStore64(void *p, uint64_t v) { - __sanitizer_unaligned_store64(p, v); -} - -} // namespace base_internal -ABSL_NAMESPACE_END -} // namespace absl - -#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ - (absl::base_internal::UnalignedLoad16(_p)) -#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ - (absl::base_internal::UnalignedLoad32(_p)) -#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ - (absl::base_internal::UnalignedLoad64(_p)) - -#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ - (absl::base_internal::UnalignedStore16(_p, _val)) -#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ - (absl::base_internal::UnalignedStore32(_p, _val)) -#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ - (absl::base_internal::UnalignedStore64(_p, _val)) - -#else - namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { @@ -151,8 +77,6 @@ ABSL_NAMESPACE_END #define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ (absl::base_internal::UnalignedStore64(_p, _val)) -#endif - #endif // defined(__cplusplus), end of unaligned API #endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/unscaledcycleclock.cc b/TMessagesProj/jni/voip/webrtc/absl/base/internal/unscaledcycleclock.cc index f1e7bbef8..4d352bd11 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/unscaledcycleclock.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/unscaledcycleclock.cc @@ -87,6 +87,10 @@ int64_t UnscaledCycleClock::Now() { double UnscaledCycleClock::Frequency() { #ifdef __GLIBC__ return __ppc_get_timebase_freq(); +#elif defined(_AIX) + // This is the same constant value as returned by + // __ppc_get_timebase_freq(). + return static_cast(512000000); #elif defined(__FreeBSD__) static once_flag init_timebase_frequency_once; static double timebase_frequency = 0.0; @@ -119,13 +123,23 @@ double UnscaledCycleClock::Frequency() { return aarch64_timer_frequency; } +#elif defined(__riscv) + +int64_t UnscaledCycleClock::Now() { + int64_t virtual_timer_value; + asm volatile("rdcycle %0" : "=r"(virtual_timer_value)); + return virtual_timer_value; +} + +double UnscaledCycleClock::Frequency() { + return base_internal::NominalCPUFrequency(); +} + #elif defined(_M_IX86) || defined(_M_X64) #pragma intrinsic(__rdtsc) -int64_t UnscaledCycleClock::Now() { - return __rdtsc(); -} +int64_t UnscaledCycleClock::Now() { return __rdtsc(); } double UnscaledCycleClock::Frequency() { return base_internal::NominalCPUFrequency(); diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/internal/unscaledcycleclock.h b/TMessagesProj/jni/voip/webrtc/absl/base/internal/unscaledcycleclock.h index cdce9bf8a..681ff8f99 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/internal/unscaledcycleclock.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/internal/unscaledcycleclock.h @@ -15,8 +15,8 @@ // UnscaledCycleClock // An UnscaledCycleClock yields the value and frequency of a cycle counter // that increments at a rate that is approximately constant. -// This class is for internal / whitelisted use only, you should consider -// using CycleClock instead. +// This class is for internal use only, you should consider using CycleClock +// instead. // // Notes: // The cycle counter frequency is not necessarily the core clock frequency. @@ -46,8 +46,8 @@ // The following platforms have an implementation of a hardware counter. #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \ - defined(__powerpc__) || defined(__ppc__) || \ - defined(_M_IX86) || defined(_M_X64) + defined(__powerpc__) || defined(__ppc__) || defined(__riscv) || \ + defined(_M_IX86) || defined(_M_X64) #define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1 #else #define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0 @@ -80,8 +80,8 @@ // This macro can be used to test if UnscaledCycleClock::Frequency() // is NominalCPUFrequency() on a particular platform. -#if (defined(__i386__) || defined(__x86_64__) || \ - defined(_M_IX86) || defined(_M_X64)) +#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || \ + defined(_M_IX86) || defined(_M_X64)) #define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY #endif @@ -109,7 +109,7 @@ class UnscaledCycleClock { // value. static double Frequency(); - // Whitelisted friends. + // Allowed users friend class base_internal::CycleClock; friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime; friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency; diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/log_severity.cc b/TMessagesProj/jni/voip/webrtc/absl/base/log_severity.cc index 72312afd3..de26b06e4 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/log_severity.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/base/log_severity.cc @@ -23,5 +23,31 @@ std::ostream& operator<<(std::ostream& os, absl::LogSeverity s) { if (s == absl::NormalizeLogSeverity(s)) return os << absl::LogSeverityName(s); return os << "absl::LogSeverity(" << static_cast(s) << ")"; } + +std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s) { + switch (s) { + case absl::LogSeverityAtLeast::kInfo: + case absl::LogSeverityAtLeast::kWarning: + case absl::LogSeverityAtLeast::kError: + case absl::LogSeverityAtLeast::kFatal: + return os << ">=" << static_cast(s); + case absl::LogSeverityAtLeast::kInfinity: + return os << "INFINITY"; + } + return os; +} + +std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s) { + switch (s) { + case absl::LogSeverityAtMost::kInfo: + case absl::LogSeverityAtMost::kWarning: + case absl::LogSeverityAtMost::kError: + case absl::LogSeverityAtMost::kFatal: + return os << "<=" << static_cast(s); + case absl::LogSeverityAtMost::kNegativeInfinity: + return os << "NEGATIVE_INFINITY"; + } + return os; +} ABSL_NAMESPACE_END } // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/log_severity.h b/TMessagesProj/jni/voip/webrtc/absl/base/log_severity.h index 65a3b1667..8bdca38b5 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/log_severity.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/log_severity.h @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ -#define ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ +#ifndef ABSL_BASE_LOG_SEVERITY_H_ +#define ABSL_BASE_LOG_SEVERITY_H_ #include #include @@ -36,7 +36,7 @@ ABSL_NAMESPACE_BEGIN // such values to a defined severity level, however in some cases values other // than the defined levels are useful for comparison. // -// Exmaple: +// Example: // // // Effectively disables all logging: // SetMinLogLevel(static_cast(100)); @@ -115,7 +115,58 @@ constexpr absl::LogSeverity NormalizeLogSeverity(int s) { // unspecified; do not rely on it. std::ostream& operator<<(std::ostream& os, absl::LogSeverity s); +// Enums representing a lower bound for LogSeverity. APIs that only operate on +// messages of at least a certain level (for example, `SetMinLogLevel()`) use +// this type to specify that level. absl::LogSeverityAtLeast::kInfinity is +// a level above all threshold levels and therefore no log message will +// ever meet this threshold. +enum class LogSeverityAtLeast : int { + kInfo = static_cast(absl::LogSeverity::kInfo), + kWarning = static_cast(absl::LogSeverity::kWarning), + kError = static_cast(absl::LogSeverity::kError), + kFatal = static_cast(absl::LogSeverity::kFatal), + kInfinity = 1000, +}; + +std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s); + +// Enums representing an upper bound for LogSeverity. APIs that only operate on +// messages of at most a certain level (for example, buffer all messages at or +// below a certain level) use this type to specify that level. +// absl::LogSeverityAtMost::kNegativeInfinity is a level below all threshold +// levels and therefore will exclude all log messages. +enum class LogSeverityAtMost : int { + kNegativeInfinity = -1000, + kInfo = static_cast(absl::LogSeverity::kInfo), + kWarning = static_cast(absl::LogSeverity::kWarning), + kError = static_cast(absl::LogSeverity::kError), + kFatal = static_cast(absl::LogSeverity::kFatal), +}; + +std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s); + +#define COMPOP(op1, op2, T) \ + constexpr bool operator op1(absl::T lhs, absl::LogSeverity rhs) { \ + return static_cast(lhs) op1 rhs; \ + } \ + constexpr bool operator op2(absl::LogSeverity lhs, absl::T rhs) { \ + return lhs op2 static_cast(rhs); \ + } + +// Comparisons between `LogSeverity` and `LogSeverityAtLeast`/ +// `LogSeverityAtMost` are only supported in one direction. +// Valid checks are: +// LogSeverity >= LogSeverityAtLeast +// LogSeverity < LogSeverityAtLeast +// LogSeverity <= LogSeverityAtMost +// LogSeverity > LogSeverityAtMost +COMPOP(>, <, LogSeverityAtLeast) +COMPOP(<=, >=, LogSeverityAtLeast) +COMPOP(<, >, LogSeverityAtMost) +COMPOP(>=, <=, LogSeverityAtMost) +#undef COMPOP + ABSL_NAMESPACE_END } // namespace absl -#endif // ABSL_BASE_INTERNAL_LOG_SEVERITY_H_ +#endif // ABSL_BASE_LOG_SEVERITY_H_ diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/macros.h b/TMessagesProj/jni/voip/webrtc/absl/base/macros.h index 2c4e3570c..3e085a916 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/macros.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/macros.h @@ -55,115 +55,6 @@ auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N]; ABSL_NAMESPACE_END } // namespace absl -// kLinkerInitialized -// -// An enum used only as a constructor argument to indicate that a variable has -// static storage duration, and that the constructor should do nothing to its -// state. Use of this macro indicates to the reader that it is legal to -// declare a static instance of the class, provided the constructor is given -// the absl::base_internal::kLinkerInitialized argument. -// -// Normally, it is unsafe to declare a static variable that has a constructor or -// a destructor because invocation order is undefined. However, if the type can -// be zero-initialized (which the loader does for static variables) into a valid -// state and the type's destructor does not affect storage, then a constructor -// for static initialization can be declared. -// -// Example: -// // Declaration -// explicit MyClass(absl::base_internal:LinkerInitialized x) {} -// -// // Invocation -// static MyClass my_global(absl::base_internal::kLinkerInitialized); -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { -enum LinkerInitialized { - kLinkerInitialized = 0, -}; -} // namespace base_internal -ABSL_NAMESPACE_END -} // namespace absl - -// ABSL_FALLTHROUGH_INTENDED -// -// Annotates implicit fall-through between switch labels, allowing a case to -// indicate intentional fallthrough and turn off warnings about any lack of a -// `break` statement. The ABSL_FALLTHROUGH_INTENDED macro should be followed by -// a semicolon and can be used in most places where `break` can, provided that -// no statements exist between it and the next switch label. -// -// Example: -// -// switch (x) { -// case 40: -// case 41: -// if (truth_is_out_there) { -// ++x; -// ABSL_FALLTHROUGH_INTENDED; // Use instead of/along with annotations -// // in comments -// } else { -// return x; -// } -// case 42: -// ... -// -// Notes: when compiled with clang in C++11 mode, the ABSL_FALLTHROUGH_INTENDED -// macro is expanded to the [[clang::fallthrough]] attribute, which is analysed -// when performing switch labels fall-through diagnostic -// (`-Wimplicit-fallthrough`). See clang documentation on language extensions -// for details: -// https://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough -// -// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro -// has no effect on diagnostics. In any case this macro has no effect on runtime -// behavior and performance of code. -#ifdef ABSL_FALLTHROUGH_INTENDED -#error "ABSL_FALLTHROUGH_INTENDED should not be defined." -#endif - -// TODO(zhangxy): Use c++17 standard [[fallthrough]] macro, when supported. -#if defined(__clang__) && defined(__has_warning) -#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough") -#define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]] -#endif -#elif defined(__GNUC__) && __GNUC__ >= 7 -#define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]] -#endif - -#ifndef ABSL_FALLTHROUGH_INTENDED -#define ABSL_FALLTHROUGH_INTENDED \ - do { \ - } while (0) -#endif - -// ABSL_DEPRECATED() -// -// Marks a deprecated class, struct, enum, function, method and variable -// declarations. The macro argument is used as a custom diagnostic message (e.g. -// suggestion of a better alternative). -// -// Examples: -// -// class ABSL_DEPRECATED("Use Bar instead") Foo {...}; -// -// ABSL_DEPRECATED("Use Baz() instead") void Bar() {...} -// -// template -// ABSL_DEPRECATED("Use DoThat() instead") -// void DoThis(); -// -// Every usage of a deprecated entity will trigger a warning when compiled with -// clang's `-Wdeprecated-declarations` option. This option is turned off by -// default, but the warnings will be reported by clang-tidy. -#if defined(__clang__) && __cplusplus >= 201103L -#define ABSL_DEPRECATED(message) __attribute__((deprecated(message))) -#endif - -#ifndef ABSL_DEPRECATED -#define ABSL_DEPRECATED(message) -#endif - // ABSL_BAD_CALL_IF() // // Used on a function overload to trap bad calls: any call that matches the @@ -253,4 +144,15 @@ ABSL_NAMESPACE_END #define ABSL_INTERNAL_RETHROW do {} while (false) #endif // ABSL_HAVE_EXCEPTIONS +// `ABSL_INTERNAL_UNREACHABLE` is an unreachable statement. A program which +// reaches one has undefined behavior, and the compiler may optimize +// accordingly. +#if defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable) +#define ABSL_INTERNAL_UNREACHABLE __builtin_unreachable() +#elif defined(_MSC_VER) +#define ABSL_INTERNAL_UNREACHABLE __assume(0) +#else +#define ABSL_INTERNAL_UNREACHABLE +#endif + #endif // ABSL_BASE_MACROS_H_ diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/optimization.h b/TMessagesProj/jni/voip/webrtc/absl/base/optimization.h index 646523b34..d090be128 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/optimization.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/optimization.h @@ -22,13 +22,15 @@ #ifndef ABSL_BASE_OPTIMIZATION_H_ #define ABSL_BASE_OPTIMIZATION_H_ +#include + #include "absl/base/config.h" // ABSL_BLOCK_TAIL_CALL_OPTIMIZATION // -// Instructs the compiler to avoid optimizing tail-call recursion. Use of this -// macro is useful when you wish to preserve the existing function order within -// a stack trace for logging, debugging, or profiling purposes. +// Instructs the compiler to avoid optimizing tail-call recursion. This macro is +// useful when you wish to preserve the existing function order within a stack +// trace for logging, debugging, or profiling purposes. // // Example: // @@ -104,9 +106,10 @@ // Cacheline aligning objects properly allows constructive memory sharing and // prevents destructive (or "false") memory sharing. // -// NOTE: this macro should be replaced with usage of `alignas()` using +// NOTE: callers should replace uses of this macro with `alignas()` using // `std::hardware_constructive_interference_size` and/or -// `std::hardware_destructive_interference_size` when available within C++17. +// `std::hardware_destructive_interference_size` when C++17 becomes available to +// them. // // See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html // for more information. @@ -171,11 +174,71 @@ // to yield performance improvements. #if ABSL_HAVE_BUILTIN(__builtin_expect) || \ (defined(__GNUC__) && !defined(__clang__)) -#define ABSL_PREDICT_FALSE(x) (__builtin_expect(x, 0)) +#define ABSL_PREDICT_FALSE(x) (__builtin_expect(false || (x), false)) #define ABSL_PREDICT_TRUE(x) (__builtin_expect(false || (x), true)) #else #define ABSL_PREDICT_FALSE(x) (x) #define ABSL_PREDICT_TRUE(x) (x) #endif +// ABSL_INTERNAL_ASSUME(cond) +// Informs the compiler that a condition is always true and that it can assume +// it to be true for optimization purposes. The call has undefined behavior if +// the condition is false. +// In !NDEBUG mode, the condition is checked with an assert(). +// NOTE: The expression must not have side effects, as it will only be evaluated +// in some compilation modes and not others. +// +// Example: +// +// int x = ...; +// ABSL_INTERNAL_ASSUME(x >= 0); +// // The compiler can optimize the division to a simple right shift using the +// // assumption specified above. +// int y = x / 16; +// +#if !defined(NDEBUG) +#define ABSL_INTERNAL_ASSUME(cond) assert(cond) +#elif ABSL_HAVE_BUILTIN(__builtin_assume) +#define ABSL_INTERNAL_ASSUME(cond) __builtin_assume(cond) +#elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable) +#define ABSL_INTERNAL_ASSUME(cond) \ + do { \ + if (!(cond)) __builtin_unreachable(); \ + } while (0) +#elif defined(_MSC_VER) +#define ABSL_INTERNAL_ASSUME(cond) __assume(cond) +#else +#define ABSL_INTERNAL_ASSUME(cond) \ + do { \ + static_cast(false && (cond)); \ + } while (0) +#endif + +// ABSL_INTERNAL_UNIQUE_SMALL_NAME(cond) +// This macro forces small unique name on a static file level symbols like +// static local variables or static functions. This is intended to be used in +// macro definitions to optimize the cost of generated code. Do NOT use it on +// symbols exported from translation unit since it may cause a link time +// conflict. +// +// Example: +// +// #define MY_MACRO(txt) +// namespace { +// char VeryVeryLongVarName[] ABSL_INTERNAL_UNIQUE_SMALL_NAME() = txt; +// const char* VeryVeryLongFuncName() ABSL_INTERNAL_UNIQUE_SMALL_NAME(); +// const char* VeryVeryLongFuncName() { return txt; } +// } +// + +#if defined(__GNUC__) +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) #x +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME1(x) ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME() \ + asm(ABSL_INTERNAL_UNIQUE_SMALL_NAME1(.absl.__COUNTER__)) +#else +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME() +#endif + #endif // ABSL_BASE_OPTIMIZATION_H_ diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/options.h b/TMessagesProj/jni/voip/webrtc/absl/base/options.h index 230bf1eec..1641271cd 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/options.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/options.h @@ -100,7 +100,7 @@ // User code should not inspect this macro. To check in the preprocessor if // absl::any is a typedef of std::any, use the feature macro ABSL_USES_STD_ANY. -#define ABSL_OPTION_USE_STD_ANY 2 +#define ABSL_OPTION_USE_STD_ANY 0 // ABSL_OPTION_USE_STD_OPTIONAL @@ -127,7 +127,7 @@ // absl::optional is a typedef of std::optional, use the feature macro // ABSL_USES_STD_OPTIONAL. -#define ABSL_OPTION_USE_STD_OPTIONAL 2 +#define ABSL_OPTION_USE_STD_OPTIONAL 0 // ABSL_OPTION_USE_STD_STRING_VIEW @@ -154,7 +154,7 @@ // absl::string_view is a typedef of std::string_view, use the feature macro // ABSL_USES_STD_STRING_VIEW. -#define ABSL_OPTION_USE_STD_STRING_VIEW 2 +#define ABSL_OPTION_USE_STD_STRING_VIEW 0 // ABSL_OPTION_USE_STD_VARIANT // @@ -180,7 +180,7 @@ // absl::variant is a typedef of std::variant, use the feature macro // ABSL_USES_STD_VARIANT. -#define ABSL_OPTION_USE_STD_VARIANT 2 +#define ABSL_OPTION_USE_STD_VARIANT 0 // ABSL_OPTION_USE_INLINE_NAMESPACE @@ -233,6 +233,6 @@ // checks enabled by this option may abort the program in a different way and // log additional information when `NDEBUG` is not defined. -#define ABSL_OPTION_HARDENED 0 +#define ABSL_OPTION_HARDENED 1 #endif // ABSL_BASE_OPTIONS_H_ diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/policy_checks.h b/TMessagesProj/jni/voip/webrtc/absl/base/policy_checks.h index 4dfa49e54..06b324391 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/policy_checks.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/policy_checks.h @@ -41,7 +41,7 @@ #endif // ----------------------------------------------------------------------------- -// Compiler Check +// Toolchain Check // ----------------------------------------------------------------------------- // We support MSVC++ 14.0 update 2 and later. diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/port.h b/TMessagesProj/jni/voip/webrtc/absl/base/port.h index 6c28068d4..5bc4d6cd9 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/port.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/port.h @@ -14,7 +14,6 @@ // // This files is a forwarding header for other headers containing various // portability macros and functions. -// This file is used for both C and C++! #ifndef ABSL_BASE_PORT_H_ #define ABSL_BASE_PORT_H_ diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/spinlock_test_common.cc b/TMessagesProj/jni/voip/webrtc/absl/base/spinlock_test_common.cc index 08f61ba86..2b572c5b3 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/spinlock_test_common.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/base/spinlock_test_common.cc @@ -20,10 +20,12 @@ #include #include #include // NOLINT(build/c++11) +#include #include #include "gtest/gtest.h" #include "absl/base/attributes.h" +#include "absl/base/config.h" #include "absl/base/internal/low_level_scheduling.h" #include "absl/base/internal/scheduling_mode.h" #include "absl/base/internal/spinlock.h" @@ -56,12 +58,10 @@ namespace { static constexpr int kArrayLength = 10; static uint32_t values[kArrayLength]; -static SpinLock static_spinlock(base_internal::kLinkerInitialized); -static SpinLock static_cooperative_spinlock( - base_internal::kLinkerInitialized, - base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); -static SpinLock static_noncooperative_spinlock( - base_internal::kLinkerInitialized, base_internal::SCHEDULE_KERNEL_ONLY); +ABSL_CONST_INIT static SpinLock static_cooperative_spinlock( + absl::kConstInit, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); +ABSL_CONST_INIT static SpinLock static_noncooperative_spinlock( + absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); // Simple integer hash function based on the public domain lookup2 hash. // http://burtleburtle.net/bob/c/lookup2.c @@ -92,6 +92,7 @@ static void TestFunction(int thread_salt, SpinLock* spinlock) { static void ThreadedTest(SpinLock* spinlock) { std::vector threads; + threads.reserve(kNumThreads); for (int i = 0; i < kNumThreads; ++i) { threads.push_back(std::thread(TestFunction, i, spinlock)); } @@ -105,6 +106,10 @@ static void ThreadedTest(SpinLock* spinlock) { } } +#ifndef ABSL_HAVE_THREAD_SANITIZER +static_assert(std::is_trivially_destructible(), ""); +#endif + TEST(SpinLock, StackNonCooperativeDisablesScheduling) { SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY); spinlock.Lock(); @@ -191,10 +196,6 @@ TEST(SpinLock, WaitCyclesEncoding) { EXPECT_GT(expected_max_value_decoded, before_max_value_decoded); } -TEST(SpinLockWithThreads, StaticSpinLock) { - ThreadedTest(&static_spinlock); -} - TEST(SpinLockWithThreads, StackSpinLock) { SpinLock spinlock; ThreadedTest(&spinlock); diff --git a/TMessagesProj/jni/voip/webrtc/absl/base/thread_annotations.h b/TMessagesProj/jni/voip/webrtc/absl/base/thread_annotations.h index bcd05e51d..531e4f7a4 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/base/thread_annotations.h +++ b/TMessagesProj/jni/voip/webrtc/absl/base/thread_annotations.h @@ -34,14 +34,9 @@ #ifndef ABSL_BASE_THREAD_ANNOTATIONS_H_ #define ABSL_BASE_THREAD_ANNOTATIONS_H_ +#include "absl/base/attributes.h" #include "absl/base/config.h" -#if defined(__clang__) -#define ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(x) __attribute__((x)) -#else -#define ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(x) // no-op -#endif - // ABSL_GUARDED_BY() // // Documents if a shared field or global variable needs to be protected by a @@ -59,8 +54,11 @@ // int p1_ ABSL_GUARDED_BY(mu_); // ... // }; -#define ABSL_GUARDED_BY(x) \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(guarded_by(x)) +#if ABSL_HAVE_ATTRIBUTE(guarded_by) +#define ABSL_GUARDED_BY(x) __attribute__((guarded_by(x))) +#else +#define ABSL_GUARDED_BY(x) +#endif // ABSL_PT_GUARDED_BY() // @@ -82,8 +80,11 @@ // // `q_`, guarded by `mu1_`, points to a shared memory location that is // // guarded by `mu2_`: // int *q_ ABSL_GUARDED_BY(mu1_) ABSL_PT_GUARDED_BY(mu2_); -#define ABSL_PT_GUARDED_BY(x) \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(pt_guarded_by(x)) +#if ABSL_HAVE_ATTRIBUTE(pt_guarded_by) +#define ABSL_PT_GUARDED_BY(x) __attribute__((pt_guarded_by(x))) +#else +#define ABSL_PT_GUARDED_BY(x) +#endif // ABSL_ACQUIRED_AFTER() / ABSL_ACQUIRED_BEFORE() // @@ -100,11 +101,17 @@ // // Mutex m1_; // Mutex m2_ ABSL_ACQUIRED_AFTER(m1_); -#define ABSL_ACQUIRED_AFTER(...) \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(acquired_after(__VA_ARGS__)) +#if ABSL_HAVE_ATTRIBUTE(acquired_after) +#define ABSL_ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__))) +#else +#define ABSL_ACQUIRED_AFTER(...) +#endif -#define ABSL_ACQUIRED_BEFORE(...) \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(acquired_before(__VA_ARGS__)) +#if ABSL_HAVE_ATTRIBUTE(acquired_before) +#define ABSL_ACQUIRED_BEFORE(...) __attribute__((acquired_before(__VA_ARGS__))) +#else +#define ABSL_ACQUIRED_BEFORE(...) +#endif // ABSL_EXCLUSIVE_LOCKS_REQUIRED() / ABSL_SHARED_LOCKS_REQUIRED() // @@ -129,65 +136,95 @@ // // void foo() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } // void bar() const ABSL_SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } -#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ - exclusive_locks_required(__VA_ARGS__)) +#if ABSL_HAVE_ATTRIBUTE(exclusive_locks_required) +#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) \ + __attribute__((exclusive_locks_required(__VA_ARGS__))) +#else +#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) +#endif +#if ABSL_HAVE_ATTRIBUTE(shared_locks_required) #define ABSL_SHARED_LOCKS_REQUIRED(...) \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(shared_locks_required(__VA_ARGS__)) + __attribute__((shared_locks_required(__VA_ARGS__))) +#else +#define ABSL_SHARED_LOCKS_REQUIRED(...) +#endif // ABSL_LOCKS_EXCLUDED() // // Documents the locks acquired in the body of the function. These locks // cannot be held when calling this function (as Abseil's `Mutex` locks are // non-reentrant). -#define ABSL_LOCKS_EXCLUDED(...) \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(locks_excluded(__VA_ARGS__)) +#if ABSL_HAVE_ATTRIBUTE(locks_excluded) +#define ABSL_LOCKS_EXCLUDED(...) __attribute__((locks_excluded(__VA_ARGS__))) +#else +#define ABSL_LOCKS_EXCLUDED(...) +#endif // ABSL_LOCK_RETURNED() // // Documents a function that returns a mutex without acquiring it. For example, // a public getter method that returns a pointer to a private mutex should // be annotated with ABSL_LOCK_RETURNED. -#define ABSL_LOCK_RETURNED(x) \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(lock_returned(x)) +#if ABSL_HAVE_ATTRIBUTE(lock_returned) +#define ABSL_LOCK_RETURNED(x) __attribute__((lock_returned(x))) +#else +#define ABSL_LOCK_RETURNED(x) +#endif // ABSL_LOCKABLE // // Documents if a class/type is a lockable type (such as the `Mutex` class). -#define ABSL_LOCKABLE ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(lockable) +#if ABSL_HAVE_ATTRIBUTE(lockable) +#define ABSL_LOCKABLE __attribute__((lockable)) +#else +#define ABSL_LOCKABLE +#endif // ABSL_SCOPED_LOCKABLE // // Documents if a class does RAII locking (such as the `MutexLock` class). // The constructor should use `LOCK_FUNCTION()` to specify the mutex that is -// acquired, and the destructor should use `ABSL_UNLOCK_FUNCTION()` with no +// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no // arguments; the analysis will assume that the destructor unlocks whatever the // constructor locked. -#define ABSL_SCOPED_LOCKABLE \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(scoped_lockable) +#if ABSL_HAVE_ATTRIBUTE(scoped_lockable) +#define ABSL_SCOPED_LOCKABLE __attribute__((scoped_lockable)) +#else +#define ABSL_SCOPED_LOCKABLE +#endif // ABSL_EXCLUSIVE_LOCK_FUNCTION() // // Documents functions that acquire a lock in the body of a function, and do // not release it. -#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ - exclusive_lock_function(__VA_ARGS__)) +#if ABSL_HAVE_ATTRIBUTE(exclusive_lock_function) +#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) \ + __attribute__((exclusive_lock_function(__VA_ARGS__))) +#else +#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) +#endif // ABSL_SHARED_LOCK_FUNCTION() // // Documents functions that acquire a shared (reader) lock in the body of a // function, and do not release it. +#if ABSL_HAVE_ATTRIBUTE(shared_lock_function) #define ABSL_SHARED_LOCK_FUNCTION(...) \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(shared_lock_function(__VA_ARGS__)) + __attribute__((shared_lock_function(__VA_ARGS__))) +#else +#define ABSL_SHARED_LOCK_FUNCTION(...) +#endif // ABSL_UNLOCK_FUNCTION() // // Documents functions that expect a lock to be held on entry to the function, // and release it in the body of the function. -#define ABSL_UNLOCK_FUNCTION(...) \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(unlock_function(__VA_ARGS__)) +#if ABSL_HAVE_ATTRIBUTE(unlock_function) +#define ABSL_UNLOCK_FUNCTION(...) __attribute__((unlock_function(__VA_ARGS__))) +#else +#define ABSL_UNLOCK_FUNCTION(...) +#endif // ABSL_EXCLUSIVE_TRYLOCK_FUNCTION() / ABSL_SHARED_TRYLOCK_FUNCTION() // @@ -197,31 +234,49 @@ // success, or `false` for functions that return `false` on success. The second // argument specifies the mutex that is locked on success. If unspecified, this // mutex is assumed to be `this`. +#if ABSL_HAVE_ATTRIBUTE(exclusive_trylock_function) #define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ - exclusive_trylock_function(__VA_ARGS__)) + __attribute__((exclusive_trylock_function(__VA_ARGS__))) +#else +#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) +#endif -#define ABSL_SHARED_TRYLOCK_FUNCTION(...) \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \ - shared_trylock_function(__VA_ARGS__)) +#if ABSL_HAVE_ATTRIBUTE(shared_trylock_function) +#define ABSL_SHARED_TRYLOCK_FUNCTION(...) \ + __attribute__((shared_trylock_function(__VA_ARGS__))) +#else +#define ABSL_SHARED_TRYLOCK_FUNCTION(...) +#endif // ABSL_ASSERT_EXCLUSIVE_LOCK() / ABSL_ASSERT_SHARED_LOCK() // // Documents functions that dynamically check to see if a lock is held, and fail // if it is not held. +#if ABSL_HAVE_ATTRIBUTE(assert_exclusive_lock) #define ABSL_ASSERT_EXCLUSIVE_LOCK(...) \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(assert_exclusive_lock(__VA_ARGS__)) + __attribute__((assert_exclusive_lock(__VA_ARGS__))) +#else +#define ABSL_ASSERT_EXCLUSIVE_LOCK(...) +#endif +#if ABSL_HAVE_ATTRIBUTE(assert_shared_lock) #define ABSL_ASSERT_SHARED_LOCK(...) \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(assert_shared_lock(__VA_ARGS__)) + __attribute__((assert_shared_lock(__VA_ARGS__))) +#else +#define ABSL_ASSERT_SHARED_LOCK(...) +#endif // ABSL_NO_THREAD_SAFETY_ANALYSIS // // Turns off thread safety checking within the body of a particular function. // This annotation is used to mark functions that are known to be correct, but // the locking behavior is more complicated than the analyzer can handle. +#if ABSL_HAVE_ATTRIBUTE(no_thread_safety_analysis) #define ABSL_NO_THREAD_SAFETY_ANALYSIS \ - ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(no_thread_safety_analysis) + __attribute__((no_thread_safety_analysis)) +#else +#define ABSL_NO_THREAD_SAFETY_ANALYSIS +#endif //------------------------------------------------------------------------------ // Tool-Supplied Annotations @@ -252,7 +307,7 @@ // Disables warnings for a single read operation. This can be used to avoid // warnings when it is known that the read is not actually involved in a race, // but the compiler cannot confirm that. -#define ABSL_TS_UNCHECKED_READ(x) absl::base_internal::absl_ts_unchecked_read(x) +#define ABSL_TS_UNCHECKED_READ(x) absl::base_internal::ts_unchecked_read(x) namespace absl { ABSL_NAMESPACE_BEGIN @@ -260,14 +315,14 @@ namespace base_internal { // Takes a reference to a guarded data member, and returns an unguarded // reference. -// Do not used this function directly, use ABSL_TS_UNCHECKED_READ instead. +// Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead. template -inline const T& absl_ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { +inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { return v; } template -inline T& absl_ts_unchecked_read(T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { +inline T& ts_unchecked_read(T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { return v; } diff --git a/TMessagesProj/jni/voip/webrtc/absl/cleanup/cleanup.h b/TMessagesProj/jni/voip/webrtc/absl/cleanup/cleanup.h new file mode 100644 index 000000000..960ccd080 --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/cleanup/cleanup.h @@ -0,0 +1,140 @@ +// Copyright 2021 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: cleanup.h +// ----------------------------------------------------------------------------- +// +// `absl::Cleanup` implements the scope guard idiom, invoking the contained +// callback's `operator()() &&` on scope exit. +// +// Example: +// +// ``` +// absl::Status CopyGoodData(const char* source_path, const char* sink_path) { +// FILE* source_file = fopen(source_path, "r"); +// if (source_file == nullptr) { +// return absl::NotFoundError("No source file"); // No cleanups execute +// } +// +// // C++17 style cleanup using class template argument deduction +// absl::Cleanup source_closer = [source_file] { fclose(source_file); }; +// +// FILE* sink_file = fopen(sink_path, "w"); +// if (sink_file == nullptr) { +// return absl::NotFoundError("No sink file"); // First cleanup executes +// } +// +// // C++11 style cleanup using the factory function +// auto sink_closer = absl::MakeCleanup([sink_file] { fclose(sink_file); }); +// +// Data data; +// while (ReadData(source_file, &data)) { +// if (!data.IsGood()) { +// absl::Status result = absl::FailedPreconditionError("Read bad data"); +// return result; // Both cleanups execute +// } +// SaveData(sink_file, &data); +// } +// +// return absl::OkStatus(); // Both cleanups execute +// } +// ``` +// +// Methods: +// +// `std::move(cleanup).Cancel()` will prevent the callback from executing. +// +// `std::move(cleanup).Invoke()` will execute the callback early, before +// destruction, and prevent the callback from executing in the destructor. +// +// Usage: +// +// `absl::Cleanup` is not an interface type. It is only intended to be used +// within the body of a function. It is not a value type and instead models a +// control flow construct. Check out `defer` in Golang for something similar. + +#ifndef ABSL_CLEANUP_CLEANUP_H_ +#define ABSL_CLEANUP_CLEANUP_H_ + +#include + +#include "absl/base/config.h" +#include "absl/base/macros.h" +#include "absl/cleanup/internal/cleanup.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +template +class ABSL_MUST_USE_RESULT Cleanup final { + static_assert(cleanup_internal::WasDeduced(), + "Explicit template parameters are not supported."); + + static_assert(cleanup_internal::ReturnsVoid(), + "Callbacks that return values are not supported."); + + public: + Cleanup(Callback callback) : storage_(std::move(callback)) {} // NOLINT + + Cleanup(Cleanup&& other) = default; + + void Cancel() && { + ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); + storage_.DestroyCallback(); + } + + void Invoke() && { + ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); + storage_.InvokeCallback(); + storage_.DestroyCallback(); + } + + ~Cleanup() { + if (storage_.IsCallbackEngaged()) { + storage_.InvokeCallback(); + storage_.DestroyCallback(); + } + } + + private: + cleanup_internal::Storage storage_; +}; + +// `absl::Cleanup c = /* callback */;` +// +// C++17 type deduction API for creating an instance of `absl::Cleanup` +#if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) +template +Cleanup(Callback callback) -> Cleanup; +#endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) + +// `auto c = absl::MakeCleanup(/* callback */);` +// +// C++11 type deduction API for creating an instance of `absl::Cleanup` +template +absl::Cleanup MakeCleanup(Callback callback) { + static_assert(cleanup_internal::WasDeduced(), + "Explicit template parameters are not supported."); + + static_assert(cleanup_internal::ReturnsVoid(), + "Callbacks that return values are not supported."); + + return {std::move(callback)}; +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CLEANUP_CLEANUP_H_ diff --git a/TMessagesProj/jni/voip/webrtc/absl/cleanup/internal/cleanup.h b/TMessagesProj/jni/voip/webrtc/absl/cleanup/internal/cleanup.h new file mode 100644 index 000000000..2783fcb7c --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/cleanup/internal/cleanup.h @@ -0,0 +1,100 @@ +// Copyright 2021 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CLEANUP_INTERNAL_CLEANUP_H_ +#define ABSL_CLEANUP_INTERNAL_CLEANUP_H_ + +#include +#include +#include + +#include "absl/base/internal/invoke.h" +#include "absl/base/macros.h" +#include "absl/base/thread_annotations.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace cleanup_internal { + +struct Tag {}; + +template +constexpr bool WasDeduced() { + return (std::is_same::value) && + (sizeof...(Args) == 0); +} + +template +constexpr bool ReturnsVoid() { + return (std::is_same, void>::value); +} + +template +class Storage { + public: + Storage() = delete; + + explicit Storage(Callback callback) { + // Placement-new into a character buffer is used for eager destruction when + // the cleanup is invoked or cancelled. To ensure this optimizes well, the + // behavior is implemented locally instead of using an absl::optional. + ::new (GetCallbackBuffer()) Callback(std::move(callback)); + is_callback_engaged_ = true; + } + + Storage(Storage&& other) { + ABSL_HARDENING_ASSERT(other.IsCallbackEngaged()); + + ::new (GetCallbackBuffer()) Callback(std::move(other.GetCallback())); + is_callback_engaged_ = true; + + other.DestroyCallback(); + } + + Storage(const Storage& other) = delete; + + Storage& operator=(Storage&& other) = delete; + + Storage& operator=(const Storage& other) = delete; + + void* GetCallbackBuffer() { return static_cast(+callback_buffer_); } + + Callback& GetCallback() { + return *reinterpret_cast(GetCallbackBuffer()); + } + + bool IsCallbackEngaged() const { return is_callback_engaged_; } + + void DestroyCallback() { + is_callback_engaged_ = false; + GetCallback().~Callback(); + } + + void InvokeCallback() ABSL_NO_THREAD_SAFETY_ANALYSIS { + std::move(GetCallback())(); + } + + private: + bool is_callback_engaged_; + alignas(Callback) char callback_buffer_[sizeof(Callback)]; +}; + +} // namespace cleanup_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CLEANUP_INTERNAL_CLEANUP_H_ diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/btree_benchmark.cc b/TMessagesProj/jni/voip/webrtc/absl/container/btree_benchmark.cc index 467986768..0ca497c81 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/btree_benchmark.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/container/btree_benchmark.cc @@ -26,6 +26,7 @@ #include #include +#include "benchmark/benchmark.h" #include "absl/base/internal/raw_logging.h" #include "absl/container/btree_map.h" #include "absl/container/btree_set.h" @@ -39,7 +40,6 @@ #include "absl/strings/cord.h" #include "absl/strings/str_format.h" #include "absl/time/time.h" -#include "benchmark/benchmark.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -101,39 +101,6 @@ void BM_InsertSorted(benchmark::State& state) { BM_InsertImpl(state, true); } -// container::insert sometimes returns a pair and sometimes -// returns an iterator (for multi- containers). -template -Iter GetIterFromInsert(const std::pair& pair) { - return pair.first; -} -template -Iter GetIterFromInsert(const Iter iter) { - return iter; -} - -// Benchmark insertion of values into a container at the end. -template -void BM_InsertEnd(benchmark::State& state) { - using V = typename remove_pair_const::type; - typename KeyOfValue::type key_of_value; - - T container; - const int kSize = 10000; - for (int i = 0; i < kSize; ++i) { - container.insert(Generator(kSize)(i)); - } - V v = Generator(kSize)(kSize - 1); - typename T::key_type k = key_of_value(v); - - auto it = container.find(k); - while (state.KeepRunning()) { - // Repeatedly removing then adding v. - container.erase(it); - it = GetIterFromInsert(container.insert(v)); - } -} - // Benchmark inserting the first few elements in a container. In b-tree, this is // when the root node grows. template @@ -186,9 +153,9 @@ void BM_FullLookup(benchmark::State& state) { BM_LookupImpl(state, true); } -// Benchmark deletion of values from a container. +// Benchmark erasing values from a container. template -void BM_Delete(benchmark::State& state) { +void BM_Erase(benchmark::State& state) { using V = typename remove_pair_const::type; typename KeyOfValue::type key_of_value; std::vector values = GenerateValues(kBenchmarkValues); @@ -213,9 +180,9 @@ void BM_Delete(benchmark::State& state) { } } -// Benchmark deletion of multiple values from a container. +// Benchmark erasing multiple values from a container. template -void BM_DeleteRange(benchmark::State& state) { +void BM_EraseRange(benchmark::State& state) { using V = typename remove_pair_const::type; typename KeyOfValue::type key_of_value; std::vector values = GenerateValues(kBenchmarkValues); @@ -255,6 +222,40 @@ void BM_DeleteRange(benchmark::State& state) { } } +// Predicate that erases every other element. We can't use a lambda because +// C++11 doesn't support generic lambdas. +// TODO(b/207389011): consider adding benchmarks that remove different fractions +// of keys (e.g. 10%, 90%). +struct EraseIfPred { + uint64_t i = 0; + template + bool operator()(const T&) { + return ++i % 2; + } +}; + +// Benchmark erasing multiple values from a container with a predicate. +template +void BM_EraseIf(benchmark::State& state) { + using V = typename remove_pair_const::type; + std::vector values = GenerateValues(kBenchmarkValues); + + // Removes half of the keys per batch. + const int batch_size = (kBenchmarkValues + 1) / 2; + EraseIfPred pred; + while (state.KeepRunningBatch(batch_size)) { + state.PauseTiming(); + { + T container(values.begin(), values.end()); + state.ResumeTiming(); + erase_if(container, pred); + benchmark::DoNotOptimize(container); + state.PauseTiming(); + } + state.ResumeTiming(); + } +} + // Benchmark steady-state insert (into first half of range) and remove (from // second half of range), treating the container approximately like a queue with // log-time access for all elements. This benchmark does not test the case where @@ -510,15 +511,14 @@ BTREE_TYPES(Time); void BM_##type##_##func(benchmark::State& state) { BM_##func(state); } \ BENCHMARK(BM_##type##_##func) -#define MY_BENCHMARK3(type) \ +#define MY_BENCHMARK3_STL(type) \ MY_BENCHMARK4(type, Insert); \ MY_BENCHMARK4(type, InsertSorted); \ - MY_BENCHMARK4(type, InsertEnd); \ MY_BENCHMARK4(type, InsertSmall); \ MY_BENCHMARK4(type, Lookup); \ MY_BENCHMARK4(type, FullLookup); \ - MY_BENCHMARK4(type, Delete); \ - MY_BENCHMARK4(type, DeleteRange); \ + MY_BENCHMARK4(type, Erase); \ + MY_BENCHMARK4(type, EraseRange); \ MY_BENCHMARK4(type, QueueAddRem); \ MY_BENCHMARK4(type, MixedAddRem); \ MY_BENCHMARK4(type, Fifo); \ @@ -526,9 +526,13 @@ BTREE_TYPES(Time); MY_BENCHMARK4(type, InsertRangeRandom); \ MY_BENCHMARK4(type, InsertRangeSorted) +#define MY_BENCHMARK3(type) \ + MY_BENCHMARK4(type, EraseIf); \ + MY_BENCHMARK3_STL(type) + #define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \ - MY_BENCHMARK3(stl_##type); \ - MY_BENCHMARK3(stl_unordered_##type); \ + MY_BENCHMARK3_STL(stl_##type); \ + MY_BENCHMARK3_STL(stl_unordered_##type); \ MY_BENCHMARK3(btree_256_##type) #define MY_BENCHMARK2(type) \ @@ -718,12 +722,12 @@ double ContainerInfo(const btree_map>& b) { btree_set>; \ using btree_256_map_size##SIZE##copies##SIZE##ptr = \ btree_map>; \ - MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr); \ - MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3_STL(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr) diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/btree_map.h b/TMessagesProj/jni/voip/webrtc/absl/container/btree_map.h index bb450eadd..ad484ce02 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/btree_map.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/btree_map.h @@ -35,14 +35,17 @@ // // However, these types should not be considered drop-in replacements for // `std::map` and `std::multimap` as there are some API differences, which are -// noted in this header file. +// noted in this header file. The most consequential differences with respect to +// migrating to b-tree from the STL types are listed in the next paragraph. +// Other API differences are minor. // // Importantly, insertions and deletions may invalidate outstanding iterators, // pointers, and references to elements. Such invalidations are typically only // an issue if insertion and deletion operations are interleaved with the use of // more than one iterator, pointer, or reference simultaneously. For this // reason, `insert()` and `erase()` return a valid iterator at the current -// position. +// position. Another important difference is that key-types must be +// copy-constructible. #ifndef ABSL_CONTAINER_BTREE_MAP_H_ #define ABSL_CONTAINER_BTREE_MAP_H_ @@ -53,6 +56,14 @@ namespace absl { ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +struct map_params; + +} // namespace container_internal + // absl::btree_map<> // // An `absl::btree_map` is an ordered associative container of @@ -185,7 +196,7 @@ class btree_map // template size_type erase(const K& key): // // Erases the element with the matching key, if it exists, returning the - // number of elements erased. + // number of elements erased (0 or 1). using Base::erase; // btree_map::insert() @@ -325,6 +336,11 @@ class btree_map // does not contain an element with a matching key, this function returns an // empty node handle. // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + // // NOTE: In this context, `node_type` refers to the C++17 concept of a // move-only type that owns and provides access to the elements in associative // containers (https://en.cppreference.com/w/cpp/container/node_handle). @@ -361,8 +377,8 @@ class btree_map // Determines whether an element comparing equal to the given `key` exists // within the `btree_map`, returning `true` if so or `false` otherwise. // - // Supports heterogeneous lookup, provided that the map is provided a - // compatible heterogeneous comparator. + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. using Base::contains; // btree_map::count() @@ -373,15 +389,14 @@ class btree_map // the `btree_map`. Note that this function will return either `1` or `0` // since duplicate elements are not allowed within a `btree_map`. // - // Supports heterogeneous lookup, provided that the map is provided a - // compatible heterogeneous comparator. + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. using Base::count; // btree_map::equal_range() // - // Returns a closed range [first, last], defined by a `std::pair` of two - // iterators, containing all elements with the passed key in the - // `btree_map`. + // Returns a half-open range [first, last), defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the `btree_map`. using Base::equal_range; // btree_map::find() @@ -391,10 +406,34 @@ class btree_map // // Finds an element with the passed `key` within the `btree_map`. // - // Supports heterogeneous lookup, provided that the map is provided a - // compatible heterogeneous comparator. + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. using Base::find; + // btree_map::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element with a key that is not less than `key` within the + // `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_map::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element with a key that is greater than `key` within the + // `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::upper_bound; + // btree_map::operator[]() // // Returns a reference to the value mapped to the passed key within the @@ -439,15 +478,11 @@ void swap(btree_map &x, btree_map &y) { // absl::erase_if(absl::btree_map<>, Pred) // // Erases all elements that satisfy the predicate pred from the container. +// Returns the number of erased elements. template -void erase_if(btree_map &map, Pred pred) { - for (auto it = map.begin(); it != map.end();) { - if (pred(*it)) { - it = map.erase(it); - } else { - ++it; - } - } +typename btree_map::size_type erase_if( + btree_map &map, Pred pred) { + return container_internal::btree_access::erase_if(map, std::move(pred)); } // absl::btree_multimap @@ -652,6 +687,11 @@ class btree_multimap // does not contain an element with a matching key, this function returns an // empty node handle. // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + // // NOTE: In this context, `node_type` refers to the C++17 concept of a // move-only type that owns and provides access to the elements in associative // containers (https://en.cppreference.com/w/cpp/container/node_handle). @@ -660,9 +700,8 @@ class btree_multimap // btree_multimap::merge() // - // Extracts elements from a given `source` btree_multimap into this - // `btree_multimap`. If the destination `btree_multimap` already contains an - // element with an equivalent key, that element is not extracted. + // Extracts all elements from a given `source` btree_multimap into this + // `btree_multimap`. using Base::merge; // btree_multimap::swap(btree_multimap& other) @@ -682,8 +721,8 @@ class btree_multimap // Determines whether an element comparing equal to the given `key` exists // within the `btree_multimap`, returning `true` if so or `false` otherwise. // - // Supports heterogeneous lookup, provided that the map is provided a - // compatible heterogeneous comparator. + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. using Base::contains; // btree_multimap::count() @@ -693,13 +732,13 @@ class btree_multimap // Returns the number of elements comparing equal to the given `key` within // the `btree_multimap`. // - // Supports heterogeneous lookup, provided that the map is provided a - // compatible heterogeneous comparator. + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. using Base::count; // btree_multimap::equal_range() // - // Returns a closed range [first, last], defined by a `std::pair` of two + // Returns a half-open range [first, last), defined by a `std::pair` of two // iterators, containing all elements with the passed key in the // `btree_multimap`. using Base::equal_range; @@ -711,10 +750,34 @@ class btree_multimap // // Finds an element with the passed `key` within the `btree_multimap`. // - // Supports heterogeneous lookup, provided that the map is provided a - // compatible heterogeneous comparator. + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. using Base::find; + // btree_multimap::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element with a key that is not less than `key` within the + // `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_multimap::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element with a key that is greater than `key` within the + // `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::upper_bound; + // btree_multimap::get_allocator() // // Returns the allocator function associated with this `btree_multimap`. @@ -742,17 +805,65 @@ void swap(btree_multimap &x, btree_multimap &y) { // absl::erase_if(absl::btree_multimap<>, Pred) // // Erases all elements that satisfy the predicate pred from the container. +// Returns the number of erased elements. template -void erase_if(btree_multimap &map, Pred pred) { - for (auto it = map.begin(); it != map.end();) { - if (pred(*it)) { - it = map.erase(it); - } else { - ++it; - } - } +typename btree_multimap::size_type erase_if( + btree_multimap &map, Pred pred) { + return container_internal::btree_access::erase_if(map, std::move(pred)); } +namespace container_internal { + +// A parameters structure for holding the type parameters for a btree_map. +// Compare and Alloc should be nothrow copy-constructible. +template +struct map_params : common_params> { + using super_type = typename map_params::common_params; + using mapped_type = Data; + // This type allows us to move keys when it is safe to do so. It is safe + // for maps in which value_type and mutable_value_type are layout compatible. + using slot_policy = typename super_type::slot_policy; + using slot_type = typename super_type::slot_type; + using value_type = typename super_type::value_type; + using init_type = typename super_type::init_type; + + using original_key_compare = typename super_type::original_key_compare; + // Reference: https://en.cppreference.com/w/cpp/container/map/value_compare + class value_compare { + template + friend class btree; + + protected: + explicit value_compare(original_key_compare c) : comp(std::move(c)) {} + + original_key_compare comp; // NOLINT + + public: + auto operator()(const value_type &lhs, const value_type &rhs) const + -> decltype(comp(lhs.first, rhs.first)) { + return comp(lhs.first, rhs.first); + } + }; + using is_map_container = std::true_type; + + template + static auto key(const V &value) -> decltype(value.first) { + return value.first; + } + static const Key &key(const slot_type *s) { return slot_policy::key(s); } + static const Key &key(slot_type *s) { return slot_policy::key(s); } + // For use in node handle. + static auto mutable_key(slot_type *s) + -> decltype(slot_policy::mutable_key(s)) { + return slot_policy::mutable_key(s); + } + static mapped_type &value(value_type *value) { return value->second; } +}; + +} // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/btree_set.h b/TMessagesProj/jni/voip/webrtc/absl/container/btree_set.h index d3e78866a..78826830f 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/btree_set.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/btree_set.h @@ -35,7 +35,9 @@ // // However, these types should not be considered drop-in replacements for // `std::set` and `std::multiset` as there are some API differences, which are -// noted in this header file. +// noted in this header file. The most consequential differences with respect to +// migrating to b-tree from the STL types are listed in the next paragraph. +// Other API differences are minor. // // Importantly, insertions and deletions may invalidate outstanding iterators, // pointers, and references to elements. Such invalidations are typically only @@ -53,6 +55,17 @@ namespace absl { ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +struct set_slot_policy; + +template +struct set_params; + +} // namespace container_internal + // absl::btree_set<> // // An `absl::btree_set` is an ordered associative container of unique key @@ -183,7 +196,7 @@ class btree_set // template size_type erase(const K& key): // // Erases the element with the matching key, if it exists, returning the - // number of elements erased. + // number of elements erased (0 or 1). using Base::erase; // btree_set::insert() @@ -300,8 +313,8 @@ class btree_set // Determines whether an element comparing equal to the given `key` exists // within the `btree_set`, returning `true` if so or `false` otherwise. // - // Supports heterogeneous lookup, provided that the set is provided a - // compatible heterogeneous comparator. + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. using Base::contains; // btree_set::count() @@ -312,8 +325,8 @@ class btree_set // the `btree_set`. Note that this function will return either `1` or `0` // since duplicate elements are not allowed within a `btree_set`. // - // Supports heterogeneous lookup, provided that the set is provided a - // compatible heterogeneous comparator. + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. using Base::count; // btree_set::equal_range() @@ -330,10 +343,32 @@ class btree_set // // Finds an element with the passed `key` within the `btree_set`. // - // Supports heterogeneous lookup, provided that the set is provided a - // compatible heterogeneous comparator. + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. using Base::find; + // btree_set::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element that is not less than `key` within the `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_set::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element that is greater than `key` within the `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::upper_bound; + // btree_set::get_allocator() // // Returns the allocator function associated with this `btree_set`. @@ -363,15 +398,11 @@ void swap(btree_set &x, btree_set &y) { // absl::erase_if(absl::btree_set<>, Pred) // // Erases all elements that satisfy the predicate pred from the container. +// Returns the number of erased elements. template -void erase_if(btree_set &set, Pred pred) { - for (auto it = set.begin(); it != set.end();) { - if (pred(*it)) { - it = set.erase(it); - } else { - ++it; - } - } +typename btree_set::size_type erase_if(btree_set &set, + Pred pred) { + return container_internal::btree_access::erase_if(set, std::move(pred)); } // absl::btree_multiset<> @@ -582,9 +613,8 @@ class btree_multiset // btree_multiset::merge() // - // Extracts elements from a given `source` btree_multiset into this - // `btree_multiset`. If the destination `btree_multiset` already contains an - // element with an equivalent key, that element is not extracted. + // Extracts all elements from a given `source` btree_multiset into this + // `btree_multiset`. using Base::merge; // btree_multiset::swap(btree_multiset& other) @@ -604,8 +634,8 @@ class btree_multiset // Determines whether an element comparing equal to the given `key` exists // within the `btree_multiset`, returning `true` if so or `false` otherwise. // - // Supports heterogeneous lookup, provided that the set is provided a - // compatible heterogeneous comparator. + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. using Base::contains; // btree_multiset::count() @@ -615,8 +645,8 @@ class btree_multiset // Returns the number of elements comparing equal to the given `key` within // the `btree_multiset`. // - // Supports heterogeneous lookup, provided that the set is provided a - // compatible heterogeneous comparator. + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. using Base::count; // btree_multiset::equal_range() @@ -633,10 +663,34 @@ class btree_multiset // // Finds an element with the passed `key` within the `btree_multiset`. // - // Supports heterogeneous lookup, provided that the set is provided a - // compatible heterogeneous comparator. + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. using Base::find; + // btree_multiset::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element that is not less than `key` within the + // `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_multiset::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element that is greater than `key` within the + // `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::upper_bound; + // btree_multiset::get_allocator() // // Returns the allocator function associated with this `btree_multiset`. @@ -666,17 +720,76 @@ void swap(btree_multiset &x, btree_multiset &y) { // absl::erase_if(absl::btree_multiset<>, Pred) // // Erases all elements that satisfy the predicate pred from the container. +// Returns the number of erased elements. template -void erase_if(btree_multiset &set, Pred pred) { - for (auto it = set.begin(); it != set.end();) { - if (pred(*it)) { - it = set.erase(it); - } else { - ++it; - } - } +typename btree_multiset::size_type erase_if( + btree_multiset & set, Pred pred) { + return container_internal::btree_access::erase_if(set, std::move(pred)); } +namespace container_internal { + +// This type implements the necessary functions from the +// absl::container_internal::slot_type interface for btree_(multi)set. +template +struct set_slot_policy { + using slot_type = Key; + using value_type = Key; + using mutable_value_type = Key; + + static value_type &element(slot_type *slot) { return *slot; } + static const value_type &element(const slot_type *slot) { return *slot; } + + template + static void construct(Alloc *alloc, slot_type *slot, Args &&...args) { + absl::allocator_traits::construct(*alloc, slot, + std::forward(args)...); + } + + template + static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { + absl::allocator_traits::construct(*alloc, slot, std::move(*other)); + } + + template + static void destroy(Alloc *alloc, slot_type *slot) { + absl::allocator_traits::destroy(*alloc, slot); + } + + template + static void swap(Alloc * /*alloc*/, slot_type *a, slot_type *b) { + using std::swap; + swap(*a, *b); + } + + template + static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) { + *dest = std::move(*src); + } +}; + +// A parameters structure for holding the type parameters for a btree_set. +// Compare and Alloc should be nothrow copy-constructible. +template +struct set_params : common_params> { + using value_type = Key; + using slot_type = typename set_params::common_params::slot_type; + using value_compare = + typename set_params::common_params::original_key_compare; + using is_map_container = std::false_type; + + template + static const V &key(const V &value) { + return value; + } + static const Key &key(const slot_type *slot) { return *slot; } + static const Key &key(slot_type *slot) { return *slot; } +}; + +} // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/btree_test.cc b/TMessagesProj/jni/voip/webrtc/absl/container/btree_test.cc new file mode 100644 index 000000000..e829e0bab --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/container/btree_test.cc @@ -0,0 +1,3071 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/btree_test.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" +#include "absl/container/btree_map.h" +#include "absl/container/btree_set.h" +#include "absl/container/internal/counting_allocator.h" +#include "absl/container/internal/test_instance_tracker.h" +#include "absl/flags/flag.h" +#include "absl/hash/hash_testing.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_split.h" +#include "absl/strings/string_view.h" +#include "absl/types/compare.h" + +ABSL_FLAG(int, test_values, 10000, "The number of values to use for tests"); + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::absl::test_internal::CopyableMovableInstance; +using ::absl::test_internal::InstanceTracker; +using ::absl::test_internal::MovableOnlyInstance; +using ::testing::ElementsAre; +using ::testing::ElementsAreArray; +using ::testing::IsEmpty; +using ::testing::IsNull; +using ::testing::Pair; +using ::testing::SizeIs; + +template +void CheckPairEquals(const T &x, const U &y) { + ABSL_INTERNAL_CHECK(x == y, "Values are unequal."); +} + +template +void CheckPairEquals(const std::pair &x, const std::pair &y) { + CheckPairEquals(x.first, y.first); + CheckPairEquals(x.second, y.second); +} +} // namespace + +// The base class for a sorted associative container checker. TreeType is the +// container type to check and CheckerType is the container type to check +// against. TreeType is expected to be btree_{set,map,multiset,multimap} and +// CheckerType is expected to be {set,map,multiset,multimap}. +template +class base_checker { + public: + using key_type = typename TreeType::key_type; + using value_type = typename TreeType::value_type; + using key_compare = typename TreeType::key_compare; + using pointer = typename TreeType::pointer; + using const_pointer = typename TreeType::const_pointer; + using reference = typename TreeType::reference; + using const_reference = typename TreeType::const_reference; + using size_type = typename TreeType::size_type; + using difference_type = typename TreeType::difference_type; + using iterator = typename TreeType::iterator; + using const_iterator = typename TreeType::const_iterator; + using reverse_iterator = typename TreeType::reverse_iterator; + using const_reverse_iterator = typename TreeType::const_reverse_iterator; + + public: + base_checker() : const_tree_(tree_) {} + base_checker(const base_checker &other) + : tree_(other.tree_), const_tree_(tree_), checker_(other.checker_) {} + template + base_checker(InputIterator b, InputIterator e) + : tree_(b, e), const_tree_(tree_), checker_(b, e) {} + + iterator begin() { return tree_.begin(); } + const_iterator begin() const { return tree_.begin(); } + iterator end() { return tree_.end(); } + const_iterator end() const { return tree_.end(); } + reverse_iterator rbegin() { return tree_.rbegin(); } + const_reverse_iterator rbegin() const { return tree_.rbegin(); } + reverse_iterator rend() { return tree_.rend(); } + const_reverse_iterator rend() const { return tree_.rend(); } + + template + IterType iter_check(IterType tree_iter, CheckerIterType checker_iter) const { + if (tree_iter == tree_.end()) { + ABSL_INTERNAL_CHECK(checker_iter == checker_.end(), + "Checker iterator not at end."); + } else { + CheckPairEquals(*tree_iter, *checker_iter); + } + return tree_iter; + } + template + IterType riter_check(IterType tree_iter, CheckerIterType checker_iter) const { + if (tree_iter == tree_.rend()) { + ABSL_INTERNAL_CHECK(checker_iter == checker_.rend(), + "Checker iterator not at rend."); + } else { + CheckPairEquals(*tree_iter, *checker_iter); + } + return tree_iter; + } + void value_check(const value_type &v) { + typename KeyOfValue::type key_of_value; + const key_type &key = key_of_value(v); + CheckPairEquals(*find(key), v); + lower_bound(key); + upper_bound(key); + equal_range(key); + contains(key); + count(key); + } + void erase_check(const key_type &key) { + EXPECT_FALSE(tree_.contains(key)); + EXPECT_EQ(tree_.find(key), const_tree_.end()); + EXPECT_FALSE(const_tree_.contains(key)); + EXPECT_EQ(const_tree_.find(key), tree_.end()); + EXPECT_EQ(tree_.equal_range(key).first, + const_tree_.equal_range(key).second); + } + + iterator lower_bound(const key_type &key) { + return iter_check(tree_.lower_bound(key), checker_.lower_bound(key)); + } + const_iterator lower_bound(const key_type &key) const { + return iter_check(tree_.lower_bound(key), checker_.lower_bound(key)); + } + iterator upper_bound(const key_type &key) { + return iter_check(tree_.upper_bound(key), checker_.upper_bound(key)); + } + const_iterator upper_bound(const key_type &key) const { + return iter_check(tree_.upper_bound(key), checker_.upper_bound(key)); + } + std::pair equal_range(const key_type &key) { + std::pair + checker_res = checker_.equal_range(key); + std::pair tree_res = tree_.equal_range(key); + iter_check(tree_res.first, checker_res.first); + iter_check(tree_res.second, checker_res.second); + return tree_res; + } + std::pair equal_range( + const key_type &key) const { + std::pair + checker_res = checker_.equal_range(key); + std::pair tree_res = tree_.equal_range(key); + iter_check(tree_res.first, checker_res.first); + iter_check(tree_res.second, checker_res.second); + return tree_res; + } + iterator find(const key_type &key) { + return iter_check(tree_.find(key), checker_.find(key)); + } + const_iterator find(const key_type &key) const { + return iter_check(tree_.find(key), checker_.find(key)); + } + bool contains(const key_type &key) const { return find(key) != end(); } + size_type count(const key_type &key) const { + size_type res = checker_.count(key); + EXPECT_EQ(res, tree_.count(key)); + return res; + } + + base_checker &operator=(const base_checker &other) { + tree_ = other.tree_; + checker_ = other.checker_; + return *this; + } + + int erase(const key_type &key) { + int size = tree_.size(); + int res = checker_.erase(key); + EXPECT_EQ(res, tree_.count(key)); + EXPECT_EQ(res, tree_.erase(key)); + EXPECT_EQ(tree_.count(key), 0); + EXPECT_EQ(tree_.size(), size - res); + erase_check(key); + return res; + } + iterator erase(iterator iter) { + key_type key = iter.key(); + int size = tree_.size(); + int count = tree_.count(key); + auto checker_iter = checker_.lower_bound(key); + for (iterator tmp(tree_.lower_bound(key)); tmp != iter; ++tmp) { + ++checker_iter; + } + auto checker_next = checker_iter; + ++checker_next; + checker_.erase(checker_iter); + iter = tree_.erase(iter); + EXPECT_EQ(tree_.size(), checker_.size()); + EXPECT_EQ(tree_.size(), size - 1); + EXPECT_EQ(tree_.count(key), count - 1); + if (count == 1) { + erase_check(key); + } + return iter_check(iter, checker_next); + } + + void erase(iterator begin, iterator end) { + int size = tree_.size(); + int count = std::distance(begin, end); + auto checker_begin = checker_.lower_bound(begin.key()); + for (iterator tmp(tree_.lower_bound(begin.key())); tmp != begin; ++tmp) { + ++checker_begin; + } + auto checker_end = + end == tree_.end() ? checker_.end() : checker_.lower_bound(end.key()); + if (end != tree_.end()) { + for (iterator tmp(tree_.lower_bound(end.key())); tmp != end; ++tmp) { + ++checker_end; + } + } + const auto checker_ret = checker_.erase(checker_begin, checker_end); + const auto tree_ret = tree_.erase(begin, end); + EXPECT_EQ(std::distance(checker_.begin(), checker_ret), + std::distance(tree_.begin(), tree_ret)); + EXPECT_EQ(tree_.size(), checker_.size()); + EXPECT_EQ(tree_.size(), size - count); + } + + void clear() { + tree_.clear(); + checker_.clear(); + } + void swap(base_checker &other) { + tree_.swap(other.tree_); + checker_.swap(other.checker_); + } + + void verify() const { + tree_.verify(); + EXPECT_EQ(tree_.size(), checker_.size()); + + // Move through the forward iterators using increment. + auto checker_iter = checker_.begin(); + const_iterator tree_iter(tree_.begin()); + for (; tree_iter != tree_.end(); ++tree_iter, ++checker_iter) { + CheckPairEquals(*tree_iter, *checker_iter); + } + + // Move through the forward iterators using decrement. + for (int n = tree_.size() - 1; n >= 0; --n) { + iter_check(tree_iter, checker_iter); + --tree_iter; + --checker_iter; + } + EXPECT_EQ(tree_iter, tree_.begin()); + EXPECT_EQ(checker_iter, checker_.begin()); + + // Move through the reverse iterators using increment. + auto checker_riter = checker_.rbegin(); + const_reverse_iterator tree_riter(tree_.rbegin()); + for (; tree_riter != tree_.rend(); ++tree_riter, ++checker_riter) { + CheckPairEquals(*tree_riter, *checker_riter); + } + + // Move through the reverse iterators using decrement. + for (int n = tree_.size() - 1; n >= 0; --n) { + riter_check(tree_riter, checker_riter); + --tree_riter; + --checker_riter; + } + EXPECT_EQ(tree_riter, tree_.rbegin()); + EXPECT_EQ(checker_riter, checker_.rbegin()); + } + + const TreeType &tree() const { return tree_; } + + size_type size() const { + EXPECT_EQ(tree_.size(), checker_.size()); + return tree_.size(); + } + size_type max_size() const { return tree_.max_size(); } + bool empty() const { + EXPECT_EQ(tree_.empty(), checker_.empty()); + return tree_.empty(); + } + + protected: + TreeType tree_; + const TreeType &const_tree_; + CheckerType checker_; +}; + +namespace { +// A checker for unique sorted associative containers. TreeType is expected to +// be btree_{set,map} and CheckerType is expected to be {set,map}. +template +class unique_checker : public base_checker { + using super_type = base_checker; + + public: + using iterator = typename super_type::iterator; + using value_type = typename super_type::value_type; + + public: + unique_checker() : super_type() {} + unique_checker(const unique_checker &other) : super_type(other) {} + template + unique_checker(InputIterator b, InputIterator e) : super_type(b, e) {} + unique_checker &operator=(const unique_checker &) = default; + + // Insertion routines. + std::pair insert(const value_type &v) { + int size = this->tree_.size(); + std::pair checker_res = + this->checker_.insert(v); + std::pair tree_res = this->tree_.insert(v); + CheckPairEquals(*tree_res.first, *checker_res.first); + EXPECT_EQ(tree_res.second, checker_res.second); + EXPECT_EQ(this->tree_.size(), this->checker_.size()); + EXPECT_EQ(this->tree_.size(), size + tree_res.second); + return tree_res; + } + iterator insert(iterator position, const value_type &v) { + int size = this->tree_.size(); + std::pair checker_res = + this->checker_.insert(v); + iterator tree_res = this->tree_.insert(position, v); + CheckPairEquals(*tree_res, *checker_res.first); + EXPECT_EQ(this->tree_.size(), this->checker_.size()); + EXPECT_EQ(this->tree_.size(), size + checker_res.second); + return tree_res; + } + template + void insert(InputIterator b, InputIterator e) { + for (; b != e; ++b) { + insert(*b); + } + } +}; + +// A checker for multiple sorted associative containers. TreeType is expected +// to be btree_{multiset,multimap} and CheckerType is expected to be +// {multiset,multimap}. +template +class multi_checker : public base_checker { + using super_type = base_checker; + + public: + using iterator = typename super_type::iterator; + using value_type = typename super_type::value_type; + + public: + multi_checker() : super_type() {} + multi_checker(const multi_checker &other) : super_type(other) {} + template + multi_checker(InputIterator b, InputIterator e) : super_type(b, e) {} + multi_checker &operator=(const multi_checker &) = default; + + // Insertion routines. + iterator insert(const value_type &v) { + int size = this->tree_.size(); + auto checker_res = this->checker_.insert(v); + iterator tree_res = this->tree_.insert(v); + CheckPairEquals(*tree_res, *checker_res); + EXPECT_EQ(this->tree_.size(), this->checker_.size()); + EXPECT_EQ(this->tree_.size(), size + 1); + return tree_res; + } + iterator insert(iterator position, const value_type &v) { + int size = this->tree_.size(); + auto checker_res = this->checker_.insert(v); + iterator tree_res = this->tree_.insert(position, v); + CheckPairEquals(*tree_res, *checker_res); + EXPECT_EQ(this->tree_.size(), this->checker_.size()); + EXPECT_EQ(this->tree_.size(), size + 1); + return tree_res; + } + template + void insert(InputIterator b, InputIterator e) { + for (; b != e; ++b) { + insert(*b); + } + } +}; + +template +void DoTest(const char *name, T *b, const std::vector &values) { + typename KeyOfValue::type key_of_value; + + T &mutable_b = *b; + const T &const_b = *b; + + // Test insert. + for (int i = 0; i < values.size(); ++i) { + mutable_b.insert(values[i]); + mutable_b.value_check(values[i]); + } + ASSERT_EQ(mutable_b.size(), values.size()); + + const_b.verify(); + + // Test copy constructor. + T b_copy(const_b); + EXPECT_EQ(b_copy.size(), const_b.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_copy.find(key_of_value(values[i])), values[i]); + } + + // Test range constructor. + T b_range(const_b.begin(), const_b.end()); + EXPECT_EQ(b_range.size(), const_b.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); + } + + // Test range insertion for values that already exist. + b_range.insert(b_copy.begin(), b_copy.end()); + b_range.verify(); + + // Test range insertion for new values. + b_range.clear(); + b_range.insert(b_copy.begin(), b_copy.end()); + EXPECT_EQ(b_range.size(), b_copy.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); + } + + // Test assignment to self. Nothing should change. + b_range.operator=(b_range); + EXPECT_EQ(b_range.size(), b_copy.size()); + + // Test assignment of new values. + b_range.clear(); + b_range = b_copy; + EXPECT_EQ(b_range.size(), b_copy.size()); + + // Test swap. + b_range.clear(); + b_range.swap(b_copy); + EXPECT_EQ(b_copy.size(), 0); + EXPECT_EQ(b_range.size(), const_b.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); + } + b_range.swap(b_copy); + + // Test non-member function swap. + swap(b_range, b_copy); + EXPECT_EQ(b_copy.size(), 0); + EXPECT_EQ(b_range.size(), const_b.size()); + for (int i = 0; i < values.size(); ++i) { + CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); + } + swap(b_range, b_copy); + + // Test erase via values. + for (int i = 0; i < values.size(); ++i) { + mutable_b.erase(key_of_value(values[i])); + // Erasing a non-existent key should have no effect. + ASSERT_EQ(mutable_b.erase(key_of_value(values[i])), 0); + } + + const_b.verify(); + EXPECT_EQ(const_b.size(), 0); + + // Test erase via iterators. + mutable_b = b_copy; + for (int i = 0; i < values.size(); ++i) { + mutable_b.erase(mutable_b.find(key_of_value(values[i]))); + } + + const_b.verify(); + EXPECT_EQ(const_b.size(), 0); + + // Test insert with hint. + for (int i = 0; i < values.size(); i++) { + mutable_b.insert(mutable_b.upper_bound(key_of_value(values[i])), values[i]); + } + + const_b.verify(); + + // Test range erase. + mutable_b.erase(mutable_b.begin(), mutable_b.end()); + EXPECT_EQ(mutable_b.size(), 0); + const_b.verify(); + + // First half. + mutable_b = b_copy; + typename T::iterator mutable_iter_end = mutable_b.begin(); + for (int i = 0; i < values.size() / 2; ++i) ++mutable_iter_end; + mutable_b.erase(mutable_b.begin(), mutable_iter_end); + EXPECT_EQ(mutable_b.size(), values.size() - values.size() / 2); + const_b.verify(); + + // Second half. + mutable_b = b_copy; + typename T::iterator mutable_iter_begin = mutable_b.begin(); + for (int i = 0; i < values.size() / 2; ++i) ++mutable_iter_begin; + mutable_b.erase(mutable_iter_begin, mutable_b.end()); + EXPECT_EQ(mutable_b.size(), values.size() / 2); + const_b.verify(); + + // Second quarter. + mutable_b = b_copy; + mutable_iter_begin = mutable_b.begin(); + for (int i = 0; i < values.size() / 4; ++i) ++mutable_iter_begin; + mutable_iter_end = mutable_iter_begin; + for (int i = 0; i < values.size() / 4; ++i) ++mutable_iter_end; + mutable_b.erase(mutable_iter_begin, mutable_iter_end); + EXPECT_EQ(mutable_b.size(), values.size() - values.size() / 4); + const_b.verify(); + + mutable_b.clear(); +} + +template +void ConstTest() { + using value_type = typename T::value_type; + typename KeyOfValue::type key_of_value; + + T mutable_b; + const T &const_b = mutable_b; + + // Insert a single value into the container and test looking it up. + value_type value = Generator(2)(2); + mutable_b.insert(value); + EXPECT_TRUE(mutable_b.contains(key_of_value(value))); + EXPECT_NE(mutable_b.find(key_of_value(value)), const_b.end()); + EXPECT_TRUE(const_b.contains(key_of_value(value))); + EXPECT_NE(const_b.find(key_of_value(value)), mutable_b.end()); + EXPECT_EQ(*const_b.lower_bound(key_of_value(value)), value); + EXPECT_EQ(const_b.upper_bound(key_of_value(value)), const_b.end()); + EXPECT_EQ(*const_b.equal_range(key_of_value(value)).first, value); + + // We can only create a non-const iterator from a non-const container. + typename T::iterator mutable_iter(mutable_b.begin()); + EXPECT_EQ(mutable_iter, const_b.begin()); + EXPECT_NE(mutable_iter, const_b.end()); + EXPECT_EQ(const_b.begin(), mutable_iter); + EXPECT_NE(const_b.end(), mutable_iter); + typename T::reverse_iterator mutable_riter(mutable_b.rbegin()); + EXPECT_EQ(mutable_riter, const_b.rbegin()); + EXPECT_NE(mutable_riter, const_b.rend()); + EXPECT_EQ(const_b.rbegin(), mutable_riter); + EXPECT_NE(const_b.rend(), mutable_riter); + + // We can create a const iterator from a non-const iterator. + typename T::const_iterator const_iter(mutable_iter); + EXPECT_EQ(const_iter, mutable_b.begin()); + EXPECT_NE(const_iter, mutable_b.end()); + EXPECT_EQ(mutable_b.begin(), const_iter); + EXPECT_NE(mutable_b.end(), const_iter); + typename T::const_reverse_iterator const_riter(mutable_riter); + EXPECT_EQ(const_riter, mutable_b.rbegin()); + EXPECT_NE(const_riter, mutable_b.rend()); + EXPECT_EQ(mutable_b.rbegin(), const_riter); + EXPECT_NE(mutable_b.rend(), const_riter); + + // Make sure various methods can be invoked on a const container. + const_b.verify(); + ASSERT_TRUE(!const_b.empty()); + EXPECT_EQ(const_b.size(), 1); + EXPECT_GT(const_b.max_size(), 0); + EXPECT_TRUE(const_b.contains(key_of_value(value))); + EXPECT_EQ(const_b.count(key_of_value(value)), 1); +} + +template +void BtreeTest() { + ConstTest(); + + using V = typename remove_pair_const::type; + const std::vector random_values = GenerateValuesWithSeed( + absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values), + GTEST_FLAG_GET(random_seed)); + + unique_checker container; + + // Test key insertion/deletion in sorted order. + std::vector sorted_values(random_values); + std::sort(sorted_values.begin(), sorted_values.end()); + DoTest("sorted: ", &container, sorted_values); + + // Test key insertion/deletion in reverse sorted order. + std::reverse(sorted_values.begin(), sorted_values.end()); + DoTest("rsorted: ", &container, sorted_values); + + // Test key insertion/deletion in random order. + DoTest("random: ", &container, random_values); +} + +template +void BtreeMultiTest() { + ConstTest(); + + using V = typename remove_pair_const::type; + const std::vector random_values = GenerateValuesWithSeed( + absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values), + GTEST_FLAG_GET(random_seed)); + + multi_checker container; + + // Test keys in sorted order. + std::vector sorted_values(random_values); + std::sort(sorted_values.begin(), sorted_values.end()); + DoTest("sorted: ", &container, sorted_values); + + // Test keys in reverse sorted order. + std::reverse(sorted_values.begin(), sorted_values.end()); + DoTest("rsorted: ", &container, sorted_values); + + // Test keys in random order. + DoTest("random: ", &container, random_values); + + // Test keys in random order w/ duplicates. + std::vector duplicate_values(random_values); + duplicate_values.insert(duplicate_values.end(), random_values.begin(), + random_values.end()); + DoTest("duplicates:", &container, duplicate_values); + + // Test all identical keys. + std::vector identical_values(100); + std::fill(identical_values.begin(), identical_values.end(), + Generator(2)(2)); + DoTest("identical: ", &container, identical_values); +} + +template +struct PropagatingCountingAlloc : public CountingAllocator { + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + + using Base = CountingAllocator; + using Base::Base; + + template + explicit PropagatingCountingAlloc(const PropagatingCountingAlloc &other) + : Base(other.bytes_used_) {} + + template + struct rebind { + using other = PropagatingCountingAlloc; + }; +}; + +template +void BtreeAllocatorTest() { + using value_type = typename T::value_type; + + int64_t bytes1 = 0, bytes2 = 0; + PropagatingCountingAlloc allocator1(&bytes1); + PropagatingCountingAlloc allocator2(&bytes2); + Generator generator(1000); + + // Test that we allocate properly aligned memory. If we don't, then Layout + // will assert fail. + auto unused1 = allocator1.allocate(1); + auto unused2 = allocator2.allocate(1); + + // Test copy assignment + { + T b1(typename T::key_compare(), allocator1); + T b2(typename T::key_compare(), allocator2); + + int64_t original_bytes1 = bytes1; + b1.insert(generator(0)); + EXPECT_GT(bytes1, original_bytes1); + + // This should propagate the allocator. + b1 = b2; + EXPECT_EQ(b1.size(), 0); + EXPECT_EQ(b2.size(), 0); + EXPECT_EQ(bytes1, original_bytes1); + + for (int i = 1; i < 1000; i++) { + b1.insert(generator(i)); + } + + // We should have allocated out of allocator2. + EXPECT_GT(bytes2, bytes1); + } + + // Test move assignment + { + T b1(typename T::key_compare(), allocator1); + T b2(typename T::key_compare(), allocator2); + + int64_t original_bytes1 = bytes1; + b1.insert(generator(0)); + EXPECT_GT(bytes1, original_bytes1); + + // This should propagate the allocator. + b1 = std::move(b2); + EXPECT_EQ(b1.size(), 0); + EXPECT_EQ(bytes1, original_bytes1); + + for (int i = 1; i < 1000; i++) { + b1.insert(generator(i)); + } + + // We should have allocated out of allocator2. + EXPECT_GT(bytes2, bytes1); + } + + // Test swap + { + T b1(typename T::key_compare(), allocator1); + T b2(typename T::key_compare(), allocator2); + + int64_t original_bytes1 = bytes1; + b1.insert(generator(0)); + EXPECT_GT(bytes1, original_bytes1); + + // This should swap the allocators. + swap(b1, b2); + EXPECT_EQ(b1.size(), 0); + EXPECT_EQ(b2.size(), 1); + EXPECT_GT(bytes1, original_bytes1); + + for (int i = 1; i < 1000; i++) { + b1.insert(generator(i)); + } + + // We should have allocated out of allocator2. + EXPECT_GT(bytes2, bytes1); + } + + allocator1.deallocate(unused1, 1); + allocator2.deallocate(unused2, 1); +} + +template +void BtreeMapTest() { + using value_type = typename T::value_type; + using mapped_type = typename T::mapped_type; + + mapped_type m = Generator(0)(0); + (void)m; + + T b; + + // Verify we can insert using operator[]. + for (int i = 0; i < 1000; i++) { + value_type v = Generator(1000)(i); + b[v.first] = v.second; + } + EXPECT_EQ(b.size(), 1000); + + // Test whether we can use the "->" operator on iterators and + // reverse_iterators. This stresses the btree_map_params::pair_pointer + // mechanism. + EXPECT_EQ(b.begin()->first, Generator(1000)(0).first); + EXPECT_EQ(b.begin()->second, Generator(1000)(0).second); + EXPECT_EQ(b.rbegin()->first, Generator(1000)(999).first); + EXPECT_EQ(b.rbegin()->second, Generator(1000)(999).second); +} + +template +void BtreeMultiMapTest() { + using mapped_type = typename T::mapped_type; + mapped_type m = Generator(0)(0); + (void)m; +} + +template +void SetTest() { + EXPECT_EQ( + sizeof(absl::btree_set), + 2 * sizeof(void *) + sizeof(typename absl::btree_set::size_type)); + using BtreeSet = absl::btree_set; + using CountingBtreeSet = + absl::btree_set, PropagatingCountingAlloc>; + BtreeTest>(); + BtreeAllocatorTest(); +} + +template +void MapTest() { + EXPECT_EQ( + sizeof(absl::btree_map), + 2 * sizeof(void *) + sizeof(typename absl::btree_map::size_type)); + using BtreeMap = absl::btree_map; + using CountingBtreeMap = + absl::btree_map, + PropagatingCountingAlloc>>; + BtreeTest>(); + BtreeAllocatorTest(); + BtreeMapTest(); +} + +TEST(Btree, set_int32) { SetTest(); } +TEST(Btree, set_int64) { SetTest(); } +TEST(Btree, set_string) { SetTest(); } +TEST(Btree, set_cord) { SetTest(); } +TEST(Btree, set_pair) { SetTest>(); } +TEST(Btree, map_int32) { MapTest(); } +TEST(Btree, map_int64) { MapTest(); } +TEST(Btree, map_string) { MapTest(); } +TEST(Btree, map_cord) { MapTest(); } +TEST(Btree, map_pair) { MapTest>(); } + +template +void MultiSetTest() { + EXPECT_EQ( + sizeof(absl::btree_multiset), + 2 * sizeof(void *) + sizeof(typename absl::btree_multiset::size_type)); + using BtreeMSet = absl::btree_multiset; + using CountingBtreeMSet = + absl::btree_multiset, PropagatingCountingAlloc>; + BtreeMultiTest>(); + BtreeAllocatorTest(); +} + +template +void MultiMapTest() { + EXPECT_EQ(sizeof(absl::btree_multimap), + 2 * sizeof(void *) + + sizeof(typename absl::btree_multimap::size_type)); + using BtreeMMap = absl::btree_multimap; + using CountingBtreeMMap = + absl::btree_multimap, + PropagatingCountingAlloc>>; + BtreeMultiTest>(); + BtreeMultiMapTest(); + BtreeAllocatorTest(); +} + +TEST(Btree, multiset_int32) { MultiSetTest(); } +TEST(Btree, multiset_int64) { MultiSetTest(); } +TEST(Btree, multiset_string) { MultiSetTest(); } +TEST(Btree, multiset_cord) { MultiSetTest(); } +TEST(Btree, multiset_pair) { MultiSetTest>(); } +TEST(Btree, multimap_int32) { MultiMapTest(); } +TEST(Btree, multimap_int64) { MultiMapTest(); } +TEST(Btree, multimap_string) { MultiMapTest(); } +TEST(Btree, multimap_cord) { MultiMapTest(); } +TEST(Btree, multimap_pair) { MultiMapTest>(); } + +struct CompareIntToString { + bool operator()(const std::string &a, const std::string &b) const { + return a < b; + } + bool operator()(const std::string &a, int b) const { + return a < absl::StrCat(b); + } + bool operator()(int a, const std::string &b) const { + return absl::StrCat(a) < b; + } + using is_transparent = void; +}; + +struct NonTransparentCompare { + template + bool operator()(const T &t, const U &u) const { + // Treating all comparators as transparent can cause inefficiencies (see + // N3657 C++ proposal). Test that for comparators without 'is_transparent' + // alias (like this one), we do not attempt heterogeneous lookup. + EXPECT_TRUE((std::is_same())); + return t < u; + } +}; + +template +bool CanEraseWithEmptyBrace(T t, decltype(t.erase({})) *) { + return true; +} + +template +bool CanEraseWithEmptyBrace(T, ...) { + return false; +} + +template +void TestHeterogeneous(T table) { + auto lb = table.lower_bound("3"); + EXPECT_EQ(lb, table.lower_bound(3)); + EXPECT_NE(lb, table.lower_bound(4)); + EXPECT_EQ(lb, table.lower_bound({"3"})); + EXPECT_NE(lb, table.lower_bound({})); + + auto ub = table.upper_bound("3"); + EXPECT_EQ(ub, table.upper_bound(3)); + EXPECT_NE(ub, table.upper_bound(5)); + EXPECT_EQ(ub, table.upper_bound({"3"})); + EXPECT_NE(ub, table.upper_bound({})); + + auto er = table.equal_range("3"); + EXPECT_EQ(er, table.equal_range(3)); + EXPECT_NE(er, table.equal_range(4)); + EXPECT_EQ(er, table.equal_range({"3"})); + EXPECT_NE(er, table.equal_range({})); + + auto it = table.find("3"); + EXPECT_EQ(it, table.find(3)); + EXPECT_NE(it, table.find(4)); + EXPECT_EQ(it, table.find({"3"})); + EXPECT_NE(it, table.find({})); + + EXPECT_TRUE(table.contains(3)); + EXPECT_FALSE(table.contains(4)); + EXPECT_TRUE(table.count({"3"})); + EXPECT_FALSE(table.contains({})); + + EXPECT_EQ(1, table.count(3)); + EXPECT_EQ(0, table.count(4)); + EXPECT_EQ(1, table.count({"3"})); + EXPECT_EQ(0, table.count({})); + + auto copy = table; + copy.erase(3); + EXPECT_EQ(table.size() - 1, copy.size()); + copy.erase(4); + EXPECT_EQ(table.size() - 1, copy.size()); + copy.erase({"5"}); + EXPECT_EQ(table.size() - 2, copy.size()); + EXPECT_FALSE(CanEraseWithEmptyBrace(table, nullptr)); + + // Also run it with const T&. + if (std::is_class()) TestHeterogeneous(table); +} + +TEST(Btree, HeterogeneousLookup) { + TestHeterogeneous(btree_set{"1", "3", "5"}); + TestHeterogeneous(btree_map{ + {"1", 1}, {"3", 3}, {"5", 5}}); + TestHeterogeneous( + btree_multiset{"1", "3", "5"}); + TestHeterogeneous(btree_multimap{ + {"1", 1}, {"3", 3}, {"5", 5}}); + + // Only maps have .at() + btree_map map{ + {"", -1}, {"1", 1}, {"3", 3}, {"5", 5}}; + EXPECT_EQ(1, map.at(1)); + EXPECT_EQ(3, map.at({"3"})); + EXPECT_EQ(-1, map.at({})); + const auto &cmap = map; + EXPECT_EQ(1, cmap.at(1)); + EXPECT_EQ(3, cmap.at({"3"})); + EXPECT_EQ(-1, cmap.at({})); +} + +TEST(Btree, NoHeterogeneousLookupWithoutAlias) { + using StringSet = absl::btree_set; + StringSet s; + ASSERT_TRUE(s.insert("hello").second); + ASSERT_TRUE(s.insert("world").second); + EXPECT_TRUE(s.end() == s.find("blah")); + EXPECT_TRUE(s.begin() == s.lower_bound("hello")); + EXPECT_EQ(1, s.count("world")); + EXPECT_TRUE(s.contains("hello")); + EXPECT_TRUE(s.contains("world")); + EXPECT_FALSE(s.contains("blah")); + + using StringMultiSet = + absl::btree_multiset; + StringMultiSet ms; + ms.insert("hello"); + ms.insert("world"); + ms.insert("world"); + EXPECT_TRUE(ms.end() == ms.find("blah")); + EXPECT_TRUE(ms.begin() == ms.lower_bound("hello")); + EXPECT_EQ(2, ms.count("world")); + EXPECT_TRUE(ms.contains("hello")); + EXPECT_TRUE(ms.contains("world")); + EXPECT_FALSE(ms.contains("blah")); +} + +TEST(Btree, DefaultTransparent) { + { + // `int` does not have a default transparent comparator. + // The input value is converted to key_type. + btree_set s = {1}; + double d = 1.1; + EXPECT_EQ(s.begin(), s.find(d)); + EXPECT_TRUE(s.contains(d)); + } + + { + // `std::string` has heterogeneous support. + btree_set s = {"A"}; + EXPECT_EQ(s.begin(), s.find(absl::string_view("A"))); + EXPECT_TRUE(s.contains(absl::string_view("A"))); + } +} + +class StringLike { + public: + StringLike() = default; + + StringLike(const char *s) : s_(s) { // NOLINT + ++constructor_calls_; + } + + bool operator<(const StringLike &a) const { return s_ < a.s_; } + + static void clear_constructor_call_count() { constructor_calls_ = 0; } + + static int constructor_calls() { return constructor_calls_; } + + private: + static int constructor_calls_; + std::string s_; +}; + +int StringLike::constructor_calls_ = 0; + +TEST(Btree, HeterogeneousLookupDoesntDegradePerformance) { + using StringSet = absl::btree_set; + StringSet s; + for (int i = 0; i < 100; ++i) { + ASSERT_TRUE(s.insert(absl::StrCat(i).c_str()).second); + } + StringLike::clear_constructor_call_count(); + s.find("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.contains("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.count("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.lower_bound("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.upper_bound("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.equal_range("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); + + StringLike::clear_constructor_call_count(); + s.erase("50"); + ASSERT_EQ(1, StringLike::constructor_calls()); +} + +// Verify that swapping btrees swaps the key comparison functors and that we can +// use non-default constructible comparators. +struct SubstringLess { + SubstringLess() = delete; + explicit SubstringLess(int length) : n(length) {} + bool operator()(const std::string &a, const std::string &b) const { + return absl::string_view(a).substr(0, n) < + absl::string_view(b).substr(0, n); + } + int n; +}; + +TEST(Btree, SwapKeyCompare) { + using SubstringSet = absl::btree_set; + SubstringSet s1(SubstringLess(1), SubstringSet::allocator_type()); + SubstringSet s2(SubstringLess(2), SubstringSet::allocator_type()); + + ASSERT_TRUE(s1.insert("a").second); + ASSERT_FALSE(s1.insert("aa").second); + + ASSERT_TRUE(s2.insert("a").second); + ASSERT_TRUE(s2.insert("aa").second); + ASSERT_FALSE(s2.insert("aaa").second); + + swap(s1, s2); + + ASSERT_TRUE(s1.insert("b").second); + ASSERT_TRUE(s1.insert("bb").second); + ASSERT_FALSE(s1.insert("bbb").second); + + ASSERT_TRUE(s2.insert("b").second); + ASSERT_FALSE(s2.insert("bb").second); +} + +TEST(Btree, UpperBoundRegression) { + // Regress a bug where upper_bound would default-construct a new key_compare + // instead of copying the existing one. + using SubstringSet = absl::btree_set; + SubstringSet my_set(SubstringLess(3)); + my_set.insert("aab"); + my_set.insert("abb"); + // We call upper_bound("aaa"). If this correctly uses the length 3 + // comparator, aaa < aab < abb, so we should get aab as the result. + // If it instead uses the default-constructed length 2 comparator, + // aa == aa < ab, so we'll get abb as our result. + SubstringSet::iterator it = my_set.upper_bound("aaa"); + ASSERT_TRUE(it != my_set.end()); + EXPECT_EQ("aab", *it); +} + +TEST(Btree, Comparison) { + const int kSetSize = 1201; + absl::btree_set my_set; + for (int i = 0; i < kSetSize; ++i) { + my_set.insert(i); + } + absl::btree_set my_set_copy(my_set); + EXPECT_TRUE(my_set_copy == my_set); + EXPECT_TRUE(my_set == my_set_copy); + EXPECT_FALSE(my_set_copy != my_set); + EXPECT_FALSE(my_set != my_set_copy); + + my_set.insert(kSetSize); + EXPECT_FALSE(my_set_copy == my_set); + EXPECT_FALSE(my_set == my_set_copy); + EXPECT_TRUE(my_set_copy != my_set); + EXPECT_TRUE(my_set != my_set_copy); + + my_set.erase(kSetSize - 1); + EXPECT_FALSE(my_set_copy == my_set); + EXPECT_FALSE(my_set == my_set_copy); + EXPECT_TRUE(my_set_copy != my_set); + EXPECT_TRUE(my_set != my_set_copy); + + absl::btree_map my_map; + for (int i = 0; i < kSetSize; ++i) { + my_map[std::string(i, 'a')] = i; + } + absl::btree_map my_map_copy(my_map); + EXPECT_TRUE(my_map_copy == my_map); + EXPECT_TRUE(my_map == my_map_copy); + EXPECT_FALSE(my_map_copy != my_map); + EXPECT_FALSE(my_map != my_map_copy); + + ++my_map_copy[std::string(7, 'a')]; + EXPECT_FALSE(my_map_copy == my_map); + EXPECT_FALSE(my_map == my_map_copy); + EXPECT_TRUE(my_map_copy != my_map); + EXPECT_TRUE(my_map != my_map_copy); + + my_map_copy = my_map; + my_map["hello"] = kSetSize; + EXPECT_FALSE(my_map_copy == my_map); + EXPECT_FALSE(my_map == my_map_copy); + EXPECT_TRUE(my_map_copy != my_map); + EXPECT_TRUE(my_map != my_map_copy); + + my_map.erase(std::string(kSetSize - 1, 'a')); + EXPECT_FALSE(my_map_copy == my_map); + EXPECT_FALSE(my_map == my_map_copy); + EXPECT_TRUE(my_map_copy != my_map); + EXPECT_TRUE(my_map != my_map_copy); +} + +TEST(Btree, RangeCtorSanity) { + std::vector ivec; + ivec.push_back(1); + std::map imap; + imap.insert(std::make_pair(1, 2)); + absl::btree_multiset tmset(ivec.begin(), ivec.end()); + absl::btree_multimap tmmap(imap.begin(), imap.end()); + absl::btree_set tset(ivec.begin(), ivec.end()); + absl::btree_map tmap(imap.begin(), imap.end()); + EXPECT_EQ(1, tmset.size()); + EXPECT_EQ(1, tmmap.size()); + EXPECT_EQ(1, tset.size()); + EXPECT_EQ(1, tmap.size()); +} + +} // namespace + +class BtreeNodePeer { + public: + // Yields the size of a leaf node with a specific number of values. + template + constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) { + return btree_node< + set_params, std::allocator, + /*TargetNodeSize=*/256, // This parameter isn't used here. + /*Multi=*/false>>::SizeWithNSlots(target_values_per_node); + } + + // Yields the number of slots in a (non-root) leaf node for this btree. + template + constexpr static size_t GetNumSlotsPerNode() { + return btree_node::kNodeSlots; + } + + template + constexpr static size_t GetMaxFieldType() { + return std::numeric_limits< + typename btree_node::field_type>::max(); + } + + template + constexpr static bool UsesLinearNodeSearch() { + return btree_node::use_linear_search::value; + } + + template + constexpr static bool UsesGenerations() { + return Btree::params_type::kEnableGenerations; + } +}; + +namespace { + +class BtreeMapTest : public ::testing::Test { + public: + struct Key {}; + struct Cmp { + template + bool operator()(T, T) const { + return false; + } + }; + + struct KeyLin { + using absl_btree_prefer_linear_node_search = std::true_type; + }; + struct CmpLin : Cmp { + using absl_btree_prefer_linear_node_search = std::true_type; + }; + + struct KeyBin { + using absl_btree_prefer_linear_node_search = std::false_type; + }; + struct CmpBin : Cmp { + using absl_btree_prefer_linear_node_search = std::false_type; + }; + + template + static bool IsLinear() { + return BtreeNodePeer::UsesLinearNodeSearch>(); + } +}; + +TEST_F(BtreeMapTest, TestLinearSearchPreferredForKeyLinearViaAlias) { + // Test requesting linear search by directly exporting an alias. + EXPECT_FALSE((IsLinear())); + EXPECT_TRUE((IsLinear())); + EXPECT_TRUE((IsLinear())); + EXPECT_TRUE((IsLinear())); +} + +TEST_F(BtreeMapTest, LinearChoiceTree) { + // Cmp has precedence, and is forcing binary + EXPECT_FALSE((IsLinear())); + EXPECT_FALSE((IsLinear())); + EXPECT_FALSE((IsLinear())); + EXPECT_FALSE((IsLinear())); + EXPECT_FALSE((IsLinear())); + // Cmp has precedence, and is forcing linear + EXPECT_TRUE((IsLinear())); + EXPECT_TRUE((IsLinear())); + EXPECT_TRUE((IsLinear())); + EXPECT_TRUE((IsLinear())); + EXPECT_TRUE((IsLinear())); + // Cmp has no preference, Key determines linear vs binary. + EXPECT_FALSE((IsLinear())); + EXPECT_TRUE((IsLinear())); + EXPECT_FALSE((IsLinear())); + // arithmetic key w/ std::less or std::greater: linear + EXPECT_TRUE((IsLinear>())); + EXPECT_TRUE((IsLinear>())); + // arithmetic key w/ custom compare: binary + EXPECT_FALSE((IsLinear())); + // non-arithmetic key: binary + EXPECT_FALSE((IsLinear>())); +} + +TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) { + absl::btree_map> m; + + std::unique_ptr &v = m["A"]; + EXPECT_TRUE(v == nullptr); + v.reset(new std::string("X")); + + auto iter = m.find("A"); + EXPECT_EQ("X", *iter->second); +} + +TEST(Btree, InitializerListConstructor) { + absl::btree_set set({"a", "b"}); + EXPECT_EQ(set.count("a"), 1); + EXPECT_EQ(set.count("b"), 1); + + absl::btree_multiset mset({1, 1, 4}); + EXPECT_EQ(mset.count(1), 2); + EXPECT_EQ(mset.count(4), 1); + + absl::btree_map map({{1, 5}, {2, 10}}); + EXPECT_EQ(map[1], 5); + EXPECT_EQ(map[2], 10); + + absl::btree_multimap mmap({{1, 5}, {1, 10}}); + auto range = mmap.equal_range(1); + auto it = range.first; + ASSERT_NE(it, range.second); + EXPECT_EQ(it->second, 5); + ASSERT_NE(++it, range.second); + EXPECT_EQ(it->second, 10); + EXPECT_EQ(++it, range.second); +} + +TEST(Btree, InitializerListInsert) { + absl::btree_set set; + set.insert({"a", "b"}); + EXPECT_EQ(set.count("a"), 1); + EXPECT_EQ(set.count("b"), 1); + + absl::btree_multiset mset; + mset.insert({1, 1, 4}); + EXPECT_EQ(mset.count(1), 2); + EXPECT_EQ(mset.count(4), 1); + + absl::btree_map map; + map.insert({{1, 5}, {2, 10}}); + // Test that inserting one element using an initializer list also works. + map.insert({3, 15}); + EXPECT_EQ(map[1], 5); + EXPECT_EQ(map[2], 10); + EXPECT_EQ(map[3], 15); + + absl::btree_multimap mmap; + mmap.insert({{1, 5}, {1, 10}}); + auto range = mmap.equal_range(1); + auto it = range.first; + ASSERT_NE(it, range.second); + EXPECT_EQ(it->second, 5); + ASSERT_NE(++it, range.second); + EXPECT_EQ(it->second, 10); + EXPECT_EQ(++it, range.second); +} + +template +void AssertKeyCompareStringAdapted() { + using Adapted = typename key_compare_adapter::type; + static_assert( + std::is_same::value || + std::is_same::value, + "key_compare_adapter should have string-adapted this comparator."); +} +template +void AssertKeyCompareNotStringAdapted() { + using Adapted = typename key_compare_adapter::type; + static_assert( + !std::is_same::value && + !std::is_same::value, + "key_compare_adapter shouldn't have string-adapted this comparator."); +} + +TEST(Btree, KeyCompareAdapter) { + AssertKeyCompareStringAdapted, std::string>(); + AssertKeyCompareStringAdapted, std::string>(); + AssertKeyCompareStringAdapted, + absl::string_view>(); + AssertKeyCompareStringAdapted, + absl::string_view>(); + AssertKeyCompareStringAdapted, absl::Cord>(); + AssertKeyCompareStringAdapted, absl::Cord>(); + AssertKeyCompareNotStringAdapted, int>(); + AssertKeyCompareNotStringAdapted, int>(); +} + +TEST(Btree, RValueInsert) { + InstanceTracker tracker; + + absl::btree_set set; + set.insert(MovableOnlyInstance(1)); + set.insert(MovableOnlyInstance(3)); + MovableOnlyInstance two(2); + set.insert(set.find(MovableOnlyInstance(3)), std::move(two)); + auto it = set.find(MovableOnlyInstance(2)); + ASSERT_NE(it, set.end()); + ASSERT_NE(++it, set.end()); + EXPECT_EQ(it->value(), 3); + + absl::btree_multiset mset; + MovableOnlyInstance zero(0); + MovableOnlyInstance zero2(0); + mset.insert(std::move(zero)); + mset.insert(mset.find(MovableOnlyInstance(0)), std::move(zero2)); + EXPECT_EQ(mset.count(MovableOnlyInstance(0)), 2); + + absl::btree_map map; + std::pair p1 = {1, MovableOnlyInstance(5)}; + std::pair p2 = {2, MovableOnlyInstance(10)}; + std::pair p3 = {3, MovableOnlyInstance(15)}; + map.insert(std::move(p1)); + map.insert(std::move(p3)); + map.insert(map.find(3), std::move(p2)); + ASSERT_NE(map.find(2), map.end()); + EXPECT_EQ(map.find(2)->second.value(), 10); + + absl::btree_multimap mmap; + std::pair p4 = {1, MovableOnlyInstance(5)}; + std::pair p5 = {1, MovableOnlyInstance(10)}; + mmap.insert(std::move(p4)); + mmap.insert(mmap.find(1), std::move(p5)); + auto range = mmap.equal_range(1); + auto it1 = range.first; + ASSERT_NE(it1, range.second); + EXPECT_EQ(it1->second.value(), 10); + ASSERT_NE(++it1, range.second); + EXPECT_EQ(it1->second.value(), 5); + EXPECT_EQ(++it1, range.second); + + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.swaps(), 0); +} + +template +struct CheckedCompareOptedOutCmp : Cmp, BtreeTestOnlyCheckedCompareOptOutBase { + using Cmp::Cmp; + CheckedCompareOptedOutCmp() {} + CheckedCompareOptedOutCmp(Cmp cmp) : Cmp(std::move(cmp)) {} // NOLINT +}; + +// A btree set with a specific number of values per node. Opt out of +// checked_compare so that we can expect exact numbers of comparisons. +template > +class SizedBtreeSet + : public btree_set_container, std::allocator, + BtreeNodePeer::GetTargetNodeSize(TargetValuesPerNode), + /*Multi=*/false>>> { + using Base = typename SizedBtreeSet::btree_set_container; + + public: + SizedBtreeSet() {} + using Base::Base; +}; + +template +void ExpectOperationCounts(const int expected_moves, + const int expected_comparisons, + const std::vector &values, + InstanceTracker *tracker, Set *set) { + for (const int v : values) set->insert(MovableOnlyInstance(v)); + set->clear(); + EXPECT_EQ(tracker->moves(), expected_moves); + EXPECT_EQ(tracker->comparisons(), expected_comparisons); + EXPECT_EQ(tracker->copies(), 0); + EXPECT_EQ(tracker->swaps(), 0); + tracker->ResetCopiesMovesSwaps(); +} + +// Note: when the values in this test change, it is expected to have an impact +// on performance. +TEST(Btree, MovesComparisonsCopiesSwapsTracking) { + InstanceTracker tracker; + // Note: this is minimum number of values per node. + SizedBtreeSet set4; + // Note: this is the default number of values per node for a set of int32s + // (with 64-bit pointers). + SizedBtreeSet set61; + SizedBtreeSet set100; + + // Don't depend on flags for random values because then the expectations will + // fail if the flags change. + std::vector values = + GenerateValuesWithSeed(10000, 1 << 22, /*seed=*/23); + + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 4); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 61); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 100); + if (sizeof(void *) == 8) { + EXPECT_EQ( + BtreeNodePeer::GetNumSlotsPerNode>(), + // When we have generations, there is one fewer slot. + BtreeNodePeer::UsesGenerations>() ? 60 : 61); + } + + // Test key insertion/deletion in random order. + ExpectOperationCounts(56540, 134212, values, &tracker, &set4); + ExpectOperationCounts(386718, 129807, values, &tracker, &set61); + ExpectOperationCounts(586761, 130310, values, &tracker, &set100); + + // Test key insertion/deletion in sorted order. + std::sort(values.begin(), values.end()); + ExpectOperationCounts(24972, 85563, values, &tracker, &set4); + ExpectOperationCounts(20208, 87757, values, &tracker, &set61); + ExpectOperationCounts(20124, 96583, values, &tracker, &set100); + + // Test key insertion/deletion in reverse sorted order. + std::reverse(values.begin(), values.end()); + ExpectOperationCounts(54949, 127531, values, &tracker, &set4); + ExpectOperationCounts(338813, 118266, values, &tracker, &set61); + ExpectOperationCounts(534529, 125279, values, &tracker, &set100); +} + +struct MovableOnlyInstanceThreeWayCompare { + absl::weak_ordering operator()(const MovableOnlyInstance &a, + const MovableOnlyInstance &b) const { + return a.compare(b); + } +}; + +// Note: when the values in this test change, it is expected to have an impact +// on performance. +TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) { + InstanceTracker tracker; + // Note: this is minimum number of values per node. + SizedBtreeSet + set4; + // Note: this is the default number of values per node for a set of int32s + // (with 64-bit pointers). + SizedBtreeSet + set61; + SizedBtreeSet + set100; + + // Don't depend on flags for random values because then the expectations will + // fail if the flags change. + std::vector values = + GenerateValuesWithSeed(10000, 1 << 22, /*seed=*/23); + + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 4); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 61); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 100); + if (sizeof(void *) == 8) { + EXPECT_EQ( + BtreeNodePeer::GetNumSlotsPerNode>(), + // When we have generations, there is one fewer slot. + BtreeNodePeer::UsesGenerations>() ? 60 : 61); + } + + // Test key insertion/deletion in random order. + ExpectOperationCounts(56540, 124221, values, &tracker, &set4); + ExpectOperationCounts(386718, 119816, values, &tracker, &set61); + ExpectOperationCounts(586761, 120319, values, &tracker, &set100); + + // Test key insertion/deletion in sorted order. + std::sort(values.begin(), values.end()); + ExpectOperationCounts(24972, 85563, values, &tracker, &set4); + ExpectOperationCounts(20208, 87757, values, &tracker, &set61); + ExpectOperationCounts(20124, 96583, values, &tracker, &set100); + + // Test key insertion/deletion in reverse sorted order. + std::reverse(values.begin(), values.end()); + ExpectOperationCounts(54949, 117532, values, &tracker, &set4); + ExpectOperationCounts(338813, 108267, values, &tracker, &set61); + ExpectOperationCounts(534529, 115280, values, &tracker, &set100); +} + +struct NoDefaultCtor { + int num; + explicit NoDefaultCtor(int i) : num(i) {} + + friend bool operator<(const NoDefaultCtor &a, const NoDefaultCtor &b) { + return a.num < b.num; + } +}; + +TEST(Btree, BtreeMapCanHoldNoDefaultCtorTypes) { + absl::btree_map m; + + for (int i = 1; i <= 99; ++i) { + SCOPED_TRACE(i); + EXPECT_TRUE(m.emplace(NoDefaultCtor(i), NoDefaultCtor(100 - i)).second); + } + EXPECT_FALSE(m.emplace(NoDefaultCtor(78), NoDefaultCtor(0)).second); + + auto iter99 = m.find(NoDefaultCtor(99)); + ASSERT_NE(iter99, m.end()); + EXPECT_EQ(iter99->second.num, 1); + + auto iter1 = m.find(NoDefaultCtor(1)); + ASSERT_NE(iter1, m.end()); + EXPECT_EQ(iter1->second.num, 99); + + auto iter50 = m.find(NoDefaultCtor(50)); + ASSERT_NE(iter50, m.end()); + EXPECT_EQ(iter50->second.num, 50); + + auto iter25 = m.find(NoDefaultCtor(25)); + ASSERT_NE(iter25, m.end()); + EXPECT_EQ(iter25->second.num, 75); +} + +TEST(Btree, BtreeMultimapCanHoldNoDefaultCtorTypes) { + absl::btree_multimap m; + + for (int i = 1; i <= 99; ++i) { + SCOPED_TRACE(i); + m.emplace(NoDefaultCtor(i), NoDefaultCtor(100 - i)); + } + + auto iter99 = m.find(NoDefaultCtor(99)); + ASSERT_NE(iter99, m.end()); + EXPECT_EQ(iter99->second.num, 1); + + auto iter1 = m.find(NoDefaultCtor(1)); + ASSERT_NE(iter1, m.end()); + EXPECT_EQ(iter1->second.num, 99); + + auto iter50 = m.find(NoDefaultCtor(50)); + ASSERT_NE(iter50, m.end()); + EXPECT_EQ(iter50->second.num, 50); + + auto iter25 = m.find(NoDefaultCtor(25)); + ASSERT_NE(iter25, m.end()); + EXPECT_EQ(iter25->second.num, 75); +} + +TEST(Btree, MapAt) { + absl::btree_map map = {{1, 2}, {2, 4}}; + EXPECT_EQ(map.at(1), 2); + EXPECT_EQ(map.at(2), 4); + map.at(2) = 8; + const absl::btree_map &const_map = map; + EXPECT_EQ(const_map.at(1), 2); + EXPECT_EQ(const_map.at(2), 8); +#ifdef ABSL_HAVE_EXCEPTIONS + EXPECT_THROW(map.at(3), std::out_of_range); +#else + EXPECT_DEATH_IF_SUPPORTED(map.at(3), "absl::btree_map::at"); +#endif +} + +TEST(Btree, BtreeMultisetEmplace) { + const int value_to_insert = 123456; + absl::btree_multiset s; + auto iter = s.emplace(value_to_insert); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(*iter, value_to_insert); + auto iter2 = s.emplace(value_to_insert); + EXPECT_NE(iter2, iter); + ASSERT_NE(iter2, s.end()); + EXPECT_EQ(*iter2, value_to_insert); + auto result = s.equal_range(value_to_insert); + EXPECT_EQ(std::distance(result.first, result.second), 2); +} + +TEST(Btree, BtreeMultisetEmplaceHint) { + const int value_to_insert = 123456; + absl::btree_multiset s; + auto iter = s.emplace(value_to_insert); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(*iter, value_to_insert); + auto emplace_iter = s.emplace_hint(iter, value_to_insert); + EXPECT_NE(emplace_iter, iter); + ASSERT_NE(emplace_iter, s.end()); + EXPECT_EQ(*emplace_iter, value_to_insert); +} + +TEST(Btree, BtreeMultimapEmplace) { + const int key_to_insert = 123456; + const char value0[] = "a"; + absl::btree_multimap s; + auto iter = s.emplace(key_to_insert, value0); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(iter->first, key_to_insert); + EXPECT_EQ(iter->second, value0); + const char value1[] = "b"; + auto iter2 = s.emplace(key_to_insert, value1); + EXPECT_NE(iter2, iter); + ASSERT_NE(iter2, s.end()); + EXPECT_EQ(iter2->first, key_to_insert); + EXPECT_EQ(iter2->second, value1); + auto result = s.equal_range(key_to_insert); + EXPECT_EQ(std::distance(result.first, result.second), 2); +} + +TEST(Btree, BtreeMultimapEmplaceHint) { + const int key_to_insert = 123456; + const char value0[] = "a"; + absl::btree_multimap s; + auto iter = s.emplace(key_to_insert, value0); + ASSERT_NE(iter, s.end()); + EXPECT_EQ(iter->first, key_to_insert); + EXPECT_EQ(iter->second, value0); + const char value1[] = "b"; + auto emplace_iter = s.emplace_hint(iter, key_to_insert, value1); + EXPECT_NE(emplace_iter, iter); + ASSERT_NE(emplace_iter, s.end()); + EXPECT_EQ(emplace_iter->first, key_to_insert); + EXPECT_EQ(emplace_iter->second, value1); +} + +TEST(Btree, ConstIteratorAccessors) { + absl::btree_set set; + for (int i = 0; i < 100; ++i) { + set.insert(i); + } + + auto it = set.cbegin(); + auto r_it = set.crbegin(); + for (int i = 0; i < 100; ++i, ++it, ++r_it) { + ASSERT_EQ(*it, i); + ASSERT_EQ(*r_it, 99 - i); + } + EXPECT_EQ(it, set.cend()); + EXPECT_EQ(r_it, set.crend()); +} + +TEST(Btree, StrSplitCompatible) { + const absl::btree_set split_set = absl::StrSplit("a,b,c", ','); + const absl::btree_set expected_set = {"a", "b", "c"}; + + EXPECT_EQ(split_set, expected_set); +} + +TEST(Btree, KeyComp) { + absl::btree_set s; + EXPECT_TRUE(s.key_comp()(1, 2)); + EXPECT_FALSE(s.key_comp()(2, 2)); + EXPECT_FALSE(s.key_comp()(2, 1)); + + absl::btree_map m1; + EXPECT_TRUE(m1.key_comp()(1, 2)); + EXPECT_FALSE(m1.key_comp()(2, 2)); + EXPECT_FALSE(m1.key_comp()(2, 1)); + + // Even though we internally adapt the comparator of `m2` to be three-way and + // heterogeneous, the comparator we expose through key_comp() is the original + // unadapted comparator. + absl::btree_map m2; + EXPECT_TRUE(m2.key_comp()("a", "b")); + EXPECT_FALSE(m2.key_comp()("b", "b")); + EXPECT_FALSE(m2.key_comp()("b", "a")); +} + +TEST(Btree, ValueComp) { + absl::btree_set s; + EXPECT_TRUE(s.value_comp()(1, 2)); + EXPECT_FALSE(s.value_comp()(2, 2)); + EXPECT_FALSE(s.value_comp()(2, 1)); + + absl::btree_map m1; + EXPECT_TRUE(m1.value_comp()(std::make_pair(1, 0), std::make_pair(2, 0))); + EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(2, 0))); + EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(1, 0))); + + // Even though we internally adapt the comparator of `m2` to be three-way and + // heterogeneous, the comparator we expose through value_comp() is based on + // the original unadapted comparator. + absl::btree_map m2; + EXPECT_TRUE(m2.value_comp()(std::make_pair("a", 0), std::make_pair("b", 0))); + EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("b", 0))); + EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0))); +} + +TEST(Btree, DefaultConstruction) { + absl::btree_set s; + absl::btree_map m; + absl::btree_multiset ms; + absl::btree_multimap mm; + + EXPECT_TRUE(s.empty()); + EXPECT_TRUE(m.empty()); + EXPECT_TRUE(ms.empty()); + EXPECT_TRUE(mm.empty()); +} + +TEST(Btree, SwissTableHashable) { + static constexpr int kValues = 10000; + std::vector values(kValues); + std::iota(values.begin(), values.end(), 0); + std::vector> map_values; + for (int v : values) map_values.emplace_back(v, -v); + + using set = absl::btree_set; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ + set{}, + set{1}, + set{2}, + set{1, 2}, + set{2, 1}, + set(values.begin(), values.end()), + set(values.rbegin(), values.rend()), + })); + + using mset = absl::btree_multiset; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ + mset{}, + mset{1}, + mset{1, 1}, + mset{2}, + mset{2, 2}, + mset{1, 2}, + mset{1, 1, 2}, + mset{1, 2, 2}, + mset{1, 1, 2, 2}, + mset(values.begin(), values.end()), + mset(values.rbegin(), values.rend()), + })); + + using map = absl::btree_map; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ + map{}, + map{{1, 0}}, + map{{1, 1}}, + map{{2, 0}}, + map{{2, 2}}, + map{{1, 0}, {2, 1}}, + map(map_values.begin(), map_values.end()), + map(map_values.rbegin(), map_values.rend()), + })); + + using mmap = absl::btree_multimap; + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ + mmap{}, + mmap{{1, 0}}, + mmap{{1, 1}}, + mmap{{1, 0}, {1, 1}}, + mmap{{1, 1}, {1, 0}}, + mmap{{2, 0}}, + mmap{{2, 2}}, + mmap{{1, 0}, {2, 1}}, + mmap(map_values.begin(), map_values.end()), + mmap(map_values.rbegin(), map_values.rend()), + })); +} + +TEST(Btree, ComparableSet) { + absl::btree_set s1 = {1, 2}; + absl::btree_set s2 = {2, 3}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, ComparableSetsDifferentLength) { + absl::btree_set s1 = {1, 2}; + absl::btree_set s2 = {1, 2, 3}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); +} + +TEST(Btree, ComparableMultiset) { + absl::btree_multiset s1 = {1, 2}; + absl::btree_multiset s2 = {2, 3}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, ComparableMap) { + absl::btree_map s1 = {{1, 2}}; + absl::btree_map s2 = {{2, 3}}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, ComparableMultimap) { + absl::btree_multimap s1 = {{1, 2}}; + absl::btree_multimap s2 = {{2, 3}}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, ComparableSetWithCustomComparator) { + // As specified by + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2012/n3337.pdf section + // [container.requirements.general].12, ordering associative containers always + // uses default '<' operator + // - even if otherwise the container uses custom functor. + absl::btree_set> s1 = {1, 2}; + absl::btree_set> s2 = {2, 3}; + EXPECT_LT(s1, s2); + EXPECT_LE(s1, s2); + EXPECT_LE(s1, s1); + EXPECT_GT(s2, s1); + EXPECT_GE(s2, s1); + EXPECT_GE(s1, s1); +} + +TEST(Btree, EraseReturnsIterator) { + absl::btree_set set = {1, 2, 3, 4, 5}; + auto result_it = set.erase(set.begin(), set.find(3)); + EXPECT_EQ(result_it, set.find(3)); + result_it = set.erase(set.find(5)); + EXPECT_EQ(result_it, set.end()); +} + +TEST(Btree, ExtractAndInsertNodeHandleSet) { + absl::btree_set src1 = {1, 2, 3, 4, 5}; + auto nh = src1.extract(src1.find(3)); + EXPECT_THAT(src1, ElementsAre(1, 2, 4, 5)); + absl::btree_set other; + absl::btree_set::insert_return_type res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(3)); + EXPECT_EQ(res.position, other.find(3)); + EXPECT_TRUE(res.inserted); + EXPECT_TRUE(res.node.empty()); + + absl::btree_set src2 = {3, 4}; + nh = src2.extract(src2.find(3)); + EXPECT_THAT(src2, ElementsAre(4)); + res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(3)); + EXPECT_EQ(res.position, other.find(3)); + EXPECT_FALSE(res.inserted); + ASSERT_FALSE(res.node.empty()); + EXPECT_EQ(res.node.value(), 3); +} + +template +void TestExtractWithTrackingForSet() { + InstanceTracker tracker; + { + Set s; + // Add enough elements to make sure we test internal nodes too. + const size_t kSize = 1000; + while (s.size() < kSize) { + s.insert(MovableOnlyInstance(s.size())); + } + for (int i = 0; i < kSize; ++i) { + // Extract with key + auto nh = s.extract(MovableOnlyInstance(i)); + EXPECT_EQ(s.size(), kSize - 1); + EXPECT_EQ(nh.value().value(), i); + // Insert with node + s.insert(std::move(nh)); + EXPECT_EQ(s.size(), kSize); + + // Extract with iterator + auto it = s.find(MovableOnlyInstance(i)); + nh = s.extract(it); + EXPECT_EQ(s.size(), kSize - 1); + EXPECT_EQ(nh.value().value(), i); + // Insert with node and hint + s.insert(s.begin(), std::move(nh)); + EXPECT_EQ(s.size(), kSize); + } + } + EXPECT_EQ(0, tracker.instances()); +} + +template +void TestExtractWithTrackingForMap() { + InstanceTracker tracker; + { + Map m; + // Add enough elements to make sure we test internal nodes too. + const size_t kSize = 1000; + while (m.size() < kSize) { + m.insert( + {CopyableMovableInstance(m.size()), MovableOnlyInstance(m.size())}); + } + for (int i = 0; i < kSize; ++i) { + // Extract with key + auto nh = m.extract(CopyableMovableInstance(i)); + EXPECT_EQ(m.size(), kSize - 1); + EXPECT_EQ(nh.key().value(), i); + EXPECT_EQ(nh.mapped().value(), i); + // Insert with node + m.insert(std::move(nh)); + EXPECT_EQ(m.size(), kSize); + + // Extract with iterator + auto it = m.find(CopyableMovableInstance(i)); + nh = m.extract(it); + EXPECT_EQ(m.size(), kSize - 1); + EXPECT_EQ(nh.key().value(), i); + EXPECT_EQ(nh.mapped().value(), i); + // Insert with node and hint + m.insert(m.begin(), std::move(nh)); + EXPECT_EQ(m.size(), kSize); + } + } + EXPECT_EQ(0, tracker.instances()); +} + +TEST(Btree, ExtractTracking) { + TestExtractWithTrackingForSet>(); + TestExtractWithTrackingForSet>(); + TestExtractWithTrackingForMap< + absl::btree_map>(); + TestExtractWithTrackingForMap< + absl::btree_multimap>(); +} + +TEST(Btree, ExtractAndInsertNodeHandleMultiSet) { + absl::btree_multiset src1 = {1, 2, 3, 3, 4, 5}; + auto nh = src1.extract(src1.find(3)); + EXPECT_THAT(src1, ElementsAre(1, 2, 3, 4, 5)); + absl::btree_multiset other; + auto res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(3)); + EXPECT_EQ(res, other.find(3)); + + absl::btree_multiset src2 = {3, 4}; + nh = src2.extract(src2.find(3)); + EXPECT_THAT(src2, ElementsAre(4)); + res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(3, 3)); + EXPECT_EQ(res, ++other.find(3)); +} + +TEST(Btree, ExtractAndInsertNodeHandleMap) { + absl::btree_map src1 = {{1, 2}, {3, 4}, {5, 6}}; + auto nh = src1.extract(src1.find(3)); + EXPECT_THAT(src1, ElementsAre(Pair(1, 2), Pair(5, 6))); + absl::btree_map other; + absl::btree_map::insert_return_type res = + other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(Pair(3, 4))); + EXPECT_EQ(res.position, other.find(3)); + EXPECT_TRUE(res.inserted); + EXPECT_TRUE(res.node.empty()); + + absl::btree_map src2 = {{3, 6}}; + nh = src2.extract(src2.find(3)); + EXPECT_TRUE(src2.empty()); + res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(Pair(3, 4))); + EXPECT_EQ(res.position, other.find(3)); + EXPECT_FALSE(res.inserted); + ASSERT_FALSE(res.node.empty()); + EXPECT_EQ(res.node.key(), 3); + EXPECT_EQ(res.node.mapped(), 6); +} + +TEST(Btree, ExtractAndInsertNodeHandleMultiMap) { + absl::btree_multimap src1 = {{1, 2}, {3, 4}, {5, 6}}; + auto nh = src1.extract(src1.find(3)); + EXPECT_THAT(src1, ElementsAre(Pair(1, 2), Pair(5, 6))); + absl::btree_multimap other; + auto res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(Pair(3, 4))); + EXPECT_EQ(res, other.find(3)); + + absl::btree_multimap src2 = {{3, 6}}; + nh = src2.extract(src2.find(3)); + EXPECT_TRUE(src2.empty()); + res = other.insert(std::move(nh)); + EXPECT_THAT(other, ElementsAre(Pair(3, 4), Pair(3, 6))); + EXPECT_EQ(res, ++other.begin()); +} + +TEST(Btree, ExtractMultiMapEquivalentKeys) { + // Note: using string keys means a three-way comparator. + absl::btree_multimap map; + for (int i = 0; i < 100; ++i) { + for (int j = 0; j < 100; ++j) { + map.insert({absl::StrCat(i), j}); + } + } + + for (int i = 0; i < 100; ++i) { + const std::string key = absl::StrCat(i); + auto node_handle = map.extract(key); + EXPECT_EQ(node_handle.key(), key); + EXPECT_EQ(node_handle.mapped(), 0) << i; + } + + for (int i = 0; i < 100; ++i) { + const std::string key = absl::StrCat(i); + auto node_handle = map.extract(key); + EXPECT_EQ(node_handle.key(), key); + EXPECT_EQ(node_handle.mapped(), 1) << i; + } +} + +// For multisets, insert with hint also affects correctness because we need to +// insert immediately before the hint if possible. +struct InsertMultiHintData { + int key; + int not_key; + bool operator==(const InsertMultiHintData other) const { + return key == other.key && not_key == other.not_key; + } +}; + +struct InsertMultiHintDataKeyCompare { + using is_transparent = void; + bool operator()(const InsertMultiHintData a, + const InsertMultiHintData b) const { + return a.key < b.key; + } + bool operator()(const int a, const InsertMultiHintData b) const { + return a < b.key; + } + bool operator()(const InsertMultiHintData a, const int b) const { + return a.key < b; + } +}; + +TEST(Btree, InsertHintNodeHandle) { + // For unique sets, insert with hint is just a performance optimization. + // Test that insert works correctly when the hint is right or wrong. + { + absl::btree_set src = {1, 2, 3, 4, 5}; + auto nh = src.extract(src.find(3)); + EXPECT_THAT(src, ElementsAre(1, 2, 4, 5)); + absl::btree_set other = {0, 100}; + // Test a correct hint. + auto it = other.insert(other.lower_bound(3), std::move(nh)); + EXPECT_THAT(other, ElementsAre(0, 3, 100)); + EXPECT_EQ(it, other.find(3)); + + nh = src.extract(src.find(5)); + // Test an incorrect hint. + it = other.insert(other.end(), std::move(nh)); + EXPECT_THAT(other, ElementsAre(0, 3, 5, 100)); + EXPECT_EQ(it, other.find(5)); + } + + absl::btree_multiset src = + {{1, 2}, {3, 4}, {3, 5}}; + auto nh = src.extract(src.lower_bound(3)); + EXPECT_EQ(nh.value(), (InsertMultiHintData{3, 4})); + absl::btree_multiset + other = {{3, 1}, {3, 2}, {3, 3}}; + auto it = other.insert(--other.end(), std::move(nh)); + EXPECT_THAT( + other, ElementsAre(InsertMultiHintData{3, 1}, InsertMultiHintData{3, 2}, + InsertMultiHintData{3, 4}, InsertMultiHintData{3, 3})); + EXPECT_EQ(it, --(--other.end())); + + nh = src.extract(src.find(3)); + EXPECT_EQ(nh.value(), (InsertMultiHintData{3, 5})); + it = other.insert(other.begin(), std::move(nh)); + EXPECT_THAT(other, + ElementsAre(InsertMultiHintData{3, 5}, InsertMultiHintData{3, 1}, + InsertMultiHintData{3, 2}, InsertMultiHintData{3, 4}, + InsertMultiHintData{3, 3})); + EXPECT_EQ(it, other.begin()); +} + +struct IntCompareToCmp { + absl::weak_ordering operator()(int a, int b) const { + if (a < b) return absl::weak_ordering::less; + if (a > b) return absl::weak_ordering::greater; + return absl::weak_ordering::equivalent; + } +}; + +TEST(Btree, MergeIntoUniqueContainers) { + absl::btree_set src1 = {1, 2, 3}; + absl::btree_multiset src2 = {3, 4, 4, 5}; + absl::btree_set dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3)); + dst.merge(src2); + EXPECT_THAT(src2, ElementsAre(3, 4)); + EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(Btree, MergeIntoUniqueContainersWithCompareTo) { + absl::btree_set src1 = {1, 2, 3}; + absl::btree_multiset src2 = {3, 4, 4, 5}; + absl::btree_set dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3)); + dst.merge(src2); + EXPECT_THAT(src2, ElementsAre(3, 4)); + EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4, 5)); +} + +TEST(Btree, MergeIntoMultiContainers) { + absl::btree_set src1 = {1, 2, 3}; + absl::btree_multiset src2 = {3, 4, 4, 5}; + absl::btree_multiset dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3)); + dst.merge(src2); + EXPECT_TRUE(src2.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3, 3, 4, 4, 5)); +} + +TEST(Btree, MergeIntoMultiContainersWithCompareTo) { + absl::btree_set src1 = {1, 2, 3}; + absl::btree_multiset src2 = {3, 4, 4, 5}; + absl::btree_multiset dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3)); + dst.merge(src2); + EXPECT_TRUE(src2.empty()); + EXPECT_THAT(dst, ElementsAre(1, 2, 3, 3, 4, 4, 5)); +} + +TEST(Btree, MergeIntoMultiMapsWithDifferentComparators) { + absl::btree_map src1 = {{1, 1}, {2, 2}, {3, 3}}; + absl::btree_multimap> src2 = { + {5, 5}, {4, 1}, {4, 4}, {3, 2}}; + absl::btree_multimap dst; + + dst.merge(src1); + EXPECT_TRUE(src1.empty()); + EXPECT_THAT(dst, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3))); + dst.merge(src2); + EXPECT_TRUE(src2.empty()); + EXPECT_THAT(dst, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(3, 2), + Pair(4, 1), Pair(4, 4), Pair(5, 5))); +} + +TEST(Btree, MergeIntoSetMovableOnly) { + absl::btree_set src; + src.insert(MovableOnlyInstance(1)); + absl::btree_multiset dst1; + dst1.insert(MovableOnlyInstance(2)); + absl::btree_set dst2; + + // Test merge into multiset. + dst1.merge(src); + + EXPECT_TRUE(src.empty()); + // ElementsAre/ElementsAreArray don't work with move-only types. + ASSERT_THAT(dst1, SizeIs(2)); + EXPECT_EQ(*dst1.begin(), MovableOnlyInstance(1)); + EXPECT_EQ(*std::next(dst1.begin()), MovableOnlyInstance(2)); + + // Test merge into set. + dst2.merge(dst1); + + EXPECT_TRUE(dst1.empty()); + ASSERT_THAT(dst2, SizeIs(2)); + EXPECT_EQ(*dst2.begin(), MovableOnlyInstance(1)); + EXPECT_EQ(*std::next(dst2.begin()), MovableOnlyInstance(2)); +} + +struct KeyCompareToWeakOrdering { + template + absl::weak_ordering operator()(const T &a, const T &b) const { + return a < b ? absl::weak_ordering::less + : a == b ? absl::weak_ordering::equivalent + : absl::weak_ordering::greater; + } +}; + +struct KeyCompareToStrongOrdering { + template + absl::strong_ordering operator()(const T &a, const T &b) const { + return a < b ? absl::strong_ordering::less + : a == b ? absl::strong_ordering::equal + : absl::strong_ordering::greater; + } +}; + +TEST(Btree, UserProvidedKeyCompareToComparators) { + absl::btree_set weak_set = {1, 2, 3}; + EXPECT_TRUE(weak_set.contains(2)); + EXPECT_FALSE(weak_set.contains(4)); + + absl::btree_set strong_set = {1, 2, 3}; + EXPECT_TRUE(strong_set.contains(2)); + EXPECT_FALSE(strong_set.contains(4)); +} + +TEST(Btree, TryEmplaceBasicTest) { + absl::btree_map m; + + // Should construct a string from the literal. + m.try_emplace(1, "one"); + EXPECT_EQ(1, m.size()); + + // Try other string constructors and const lvalue key. + const int key(42); + m.try_emplace(key, 3, 'a'); + m.try_emplace(2, std::string("two")); + + EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); + EXPECT_THAT(m, ElementsAreArray(std::vector>{ + {1, "one"}, {2, "two"}, {42, "aaa"}})); +} + +TEST(Btree, TryEmplaceWithHintWorks) { + // Use a counting comparator here to verify that hint is used. + int calls = 0; + auto cmp = [&calls](int x, int y) { + ++calls; + return x < y; + }; + using Cmp = decltype(cmp); + + // Use a map that is opted out of key_compare being adapted so we can expect + // strict comparison call limits. + absl::btree_map> m(cmp); + for (int i = 0; i < 128; ++i) { + m.emplace(i, i); + } + + // Sanity check for the comparator + calls = 0; + m.emplace(127, 127); + EXPECT_GE(calls, 4); + + // Try with begin hint: + calls = 0; + auto it = m.try_emplace(m.begin(), -1, -1); + EXPECT_EQ(129, m.size()); + EXPECT_EQ(it, m.begin()); + EXPECT_LE(calls, 2); + + // Try with end hint: + calls = 0; + std::pair pair1024 = {1024, 1024}; + it = m.try_emplace(m.end(), pair1024.first, pair1024.second); + EXPECT_EQ(130, m.size()); + EXPECT_EQ(it, --m.end()); + EXPECT_LE(calls, 2); + + // Try value already present, bad hint; ensure no duplicate added: + calls = 0; + it = m.try_emplace(m.end(), 16, 17); + EXPECT_EQ(130, m.size()); + EXPECT_GE(calls, 4); + EXPECT_EQ(it, m.find(16)); + + // Try value already present, hint points directly to it: + calls = 0; + it = m.try_emplace(it, 16, 17); + EXPECT_EQ(130, m.size()); + EXPECT_LE(calls, 2); + EXPECT_EQ(it, m.find(16)); + + m.erase(2); + EXPECT_EQ(129, m.size()); + auto hint = m.find(3); + // Try emplace in the middle of two other elements. + calls = 0; + m.try_emplace(hint, 2, 2); + EXPECT_EQ(130, m.size()); + EXPECT_LE(calls, 2); + + EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); +} + +TEST(Btree, TryEmplaceWithBadHint) { + absl::btree_map m = {{1, 1}, {9, 9}}; + + // Bad hint (too small), should still emplace: + auto it = m.try_emplace(m.begin(), 2, 2); + EXPECT_EQ(it, ++m.begin()); + EXPECT_THAT(m, ElementsAreArray( + std::vector>{{1, 1}, {2, 2}, {9, 9}})); + + // Bad hint, too large this time: + it = m.try_emplace(++(++m.begin()), 0, 0); + EXPECT_EQ(it, m.begin()); + EXPECT_THAT(m, ElementsAreArray(std::vector>{ + {0, 0}, {1, 1}, {2, 2}, {9, 9}})); +} + +TEST(Btree, TryEmplaceMaintainsSortedOrder) { + absl::btree_map m; + std::pair pair5 = {5, "five"}; + + // Test both lvalue & rvalue emplace. + m.try_emplace(10, "ten"); + m.try_emplace(pair5.first, pair5.second); + EXPECT_EQ(2, m.size()); + EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); + + int int100{100}; + m.try_emplace(int100, "hundred"); + m.try_emplace(1, "one"); + EXPECT_EQ(4, m.size()); + EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); +} + +TEST(Btree, TryEmplaceWithHintAndNoValueArgsWorks) { + absl::btree_map m; + m.try_emplace(m.end(), 1); + EXPECT_EQ(0, m[1]); +} + +TEST(Btree, TryEmplaceWithHintAndMultipleValueArgsWorks) { + absl::btree_map m; + m.try_emplace(m.end(), 1, 10, 'a'); + EXPECT_EQ(std::string(10, 'a'), m[1]); +} + +TEST(Btree, MoveAssignmentAllocatorPropagation) { + InstanceTracker tracker; + + int64_t bytes1 = 0, bytes2 = 0; + PropagatingCountingAlloc allocator1(&bytes1); + PropagatingCountingAlloc allocator2(&bytes2); + std::less cmp; + + // Test propagating allocator_type. + { + absl::btree_set, + PropagatingCountingAlloc> + set1(cmp, allocator1), set2(cmp, allocator2); + + for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i)); + + tracker.ResetCopiesMovesSwaps(); + set2 = std::move(set1); + EXPECT_EQ(tracker.moves(), 0); + } + // Test non-propagating allocator_type with equal allocators. + { + absl::btree_set, + CountingAllocator> + set1(cmp, allocator1), set2(cmp, allocator1); + + for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i)); + + tracker.ResetCopiesMovesSwaps(); + set2 = std::move(set1); + EXPECT_EQ(tracker.moves(), 0); + } + // Test non-propagating allocator_type with different allocators. + { + absl::btree_set, + CountingAllocator> + set1(cmp, allocator1), set2(cmp, allocator2); + + for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i)); + + tracker.ResetCopiesMovesSwaps(); + set2 = std::move(set1); + EXPECT_GE(tracker.moves(), 100); + } +} + +TEST(Btree, EmptyTree) { + absl::btree_set s; + EXPECT_TRUE(s.empty()); + EXPECT_EQ(s.size(), 0); + EXPECT_GT(s.max_size(), 0); +} + +bool IsEven(int k) { return k % 2 == 0; } + +TEST(Btree, EraseIf) { + // Test that erase_if works with all the container types and supports lambdas. + { + absl::btree_set s = {1, 3, 5, 6, 100}; + EXPECT_EQ(erase_if(s, [](int k) { return k > 3; }), 3); + EXPECT_THAT(s, ElementsAre(1, 3)); + } + { + absl::btree_multiset s = {1, 3, 3, 5, 6, 6, 100}; + EXPECT_EQ(erase_if(s, [](int k) { return k <= 3; }), 3); + EXPECT_THAT(s, ElementsAre(5, 6, 6, 100)); + } + { + absl::btree_map m = {{1, 1}, {3, 3}, {6, 6}, {100, 100}}; + EXPECT_EQ( + erase_if(m, [](std::pair kv) { return kv.first > 3; }), + 2); + EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3))); + } + { + absl::btree_multimap m = {{1, 1}, {3, 3}, {3, 6}, + {6, 6}, {6, 7}, {100, 6}}; + EXPECT_EQ( + erase_if(m, + [](std::pair kv) { return kv.second == 6; }), + 3); + EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3), Pair(6, 7))); + } + // Test that erasing all elements from a large set works and test support for + // function pointers. + { + absl::btree_set s; + for (int i = 0; i < 1000; ++i) s.insert(2 * i); + EXPECT_EQ(erase_if(s, IsEven), 1000); + EXPECT_THAT(s, IsEmpty()); + } + // Test that erase_if supports other format of function pointers. + { + absl::btree_set s = {1, 3, 5, 6, 100}; + EXPECT_EQ(erase_if(s, &IsEven), 2); + EXPECT_THAT(s, ElementsAre(1, 3, 5)); + } + // Test that erase_if invokes the predicate once per element. + { + absl::btree_set s; + for (int i = 0; i < 1000; ++i) s.insert(i); + int pred_calls = 0; + EXPECT_EQ(erase_if(s, + [&pred_calls](int k) { + ++pred_calls; + return k % 2; + }), + 500); + EXPECT_THAT(s, SizeIs(500)); + EXPECT_EQ(pred_calls, 1000); + } +} + +TEST(Btree, InsertOrAssign) { + absl::btree_map m = {{1, 1}, {3, 3}}; + using value_type = typename decltype(m)::value_type; + + auto ret = m.insert_or_assign(4, 4); + EXPECT_EQ(*ret.first, value_type(4, 4)); + EXPECT_TRUE(ret.second); + ret = m.insert_or_assign(3, 100); + EXPECT_EQ(*ret.first, value_type(3, 100)); + EXPECT_FALSE(ret.second); + + auto hint_ret = m.insert_or_assign(ret.first, 3, 200); + EXPECT_EQ(*hint_ret, value_type(3, 200)); + hint_ret = m.insert_or_assign(m.find(1), 0, 1); + EXPECT_EQ(*hint_ret, value_type(0, 1)); + // Test with bad hint. + hint_ret = m.insert_or_assign(m.end(), -1, 1); + EXPECT_EQ(*hint_ret, value_type(-1, 1)); + + EXPECT_THAT(m, ElementsAre(Pair(-1, 1), Pair(0, 1), Pair(1, 1), Pair(3, 200), + Pair(4, 4))); +} + +TEST(Btree, InsertOrAssignMovableOnly) { + absl::btree_map m; + using value_type = typename decltype(m)::value_type; + + auto ret = m.insert_or_assign(4, MovableOnlyInstance(4)); + EXPECT_EQ(*ret.first, value_type(4, MovableOnlyInstance(4))); + EXPECT_TRUE(ret.second); + ret = m.insert_or_assign(4, MovableOnlyInstance(100)); + EXPECT_EQ(*ret.first, value_type(4, MovableOnlyInstance(100))); + EXPECT_FALSE(ret.second); + + auto hint_ret = m.insert_or_assign(ret.first, 3, MovableOnlyInstance(200)); + EXPECT_EQ(*hint_ret, value_type(3, MovableOnlyInstance(200))); + + EXPECT_EQ(m.size(), 2); +} + +TEST(Btree, BitfieldArgument) { + union { + int n : 1; + }; + n = 0; + absl::btree_map m; + m.erase(n); + m.count(n); + m.find(n); + m.contains(n); + m.equal_range(n); + m.insert_or_assign(n, n); + m.insert_or_assign(m.end(), n, n); + m.try_emplace(n); + m.try_emplace(m.end(), n); + m.at(n); + m[n]; +} + +TEST(Btree, SetRangeConstructorAndInsertSupportExplicitConversionComparable) { + const absl::string_view names[] = {"n1", "n2"}; + + absl::btree_set name_set1{std::begin(names), std::end(names)}; + EXPECT_THAT(name_set1, ElementsAreArray(names)); + + absl::btree_set name_set2; + name_set2.insert(std::begin(names), std::end(names)); + EXPECT_THAT(name_set2, ElementsAreArray(names)); +} + +// A type that is explicitly convertible from int and counts constructor calls. +struct ConstructorCounted { + explicit ConstructorCounted(int i) : i(i) { ++constructor_calls; } + bool operator==(int other) const { return i == other; } + + int i; + static int constructor_calls; +}; +int ConstructorCounted::constructor_calls = 0; + +struct ConstructorCountedCompare { + bool operator()(int a, const ConstructorCounted &b) const { return a < b.i; } + bool operator()(const ConstructorCounted &a, int b) const { return a.i < b; } + bool operator()(const ConstructorCounted &a, + const ConstructorCounted &b) const { + return a.i < b.i; + } + using is_transparent = void; +}; + +TEST(Btree, + SetRangeConstructorAndInsertExplicitConvComparableLimitConstruction) { + const int i[] = {0, 1, 1}; + ConstructorCounted::constructor_calls = 0; + + absl::btree_set set{ + std::begin(i), std::end(i)}; + EXPECT_THAT(set, ElementsAre(0, 1)); + EXPECT_EQ(ConstructorCounted::constructor_calls, 2); + + set.insert(std::begin(i), std::end(i)); + EXPECT_THAT(set, ElementsAre(0, 1)); + EXPECT_EQ(ConstructorCounted::constructor_calls, 2); +} + +TEST(Btree, + SetRangeConstructorAndInsertSupportExplicitConversionNonComparable) { + const int i[] = {0, 1}; + + absl::btree_set> s1{std::begin(i), std::end(i)}; + EXPECT_THAT(s1, ElementsAre(IsEmpty(), ElementsAre(IsNull()))); + + absl::btree_set> s2; + s2.insert(std::begin(i), std::end(i)); + EXPECT_THAT(s2, ElementsAre(IsEmpty(), ElementsAre(IsNull()))); +} + +// libstdc++ included with GCC 4.9 has a bug in the std::pair constructors that +// prevents explicit conversions between pair types. +// We only run this test for the libstdc++ from GCC 7 or newer because we can't +// reliably check the libstdc++ version prior to that release. +#if !defined(__GLIBCXX__) || \ + (defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 7) +TEST(Btree, MapRangeConstructorAndInsertSupportExplicitConversionComparable) { + const std::pair names[] = {{"n1", 1}, {"n2", 2}}; + + absl::btree_map name_map1{std::begin(names), + std::end(names)}; + EXPECT_THAT(name_map1, ElementsAre(Pair("n1", 1), Pair("n2", 2))); + + absl::btree_map name_map2; + name_map2.insert(std::begin(names), std::end(names)); + EXPECT_THAT(name_map2, ElementsAre(Pair("n1", 1), Pair("n2", 2))); +} + +TEST(Btree, + MapRangeConstructorAndInsertExplicitConvComparableLimitConstruction) { + const std::pair i[] = {{0, 1}, {1, 2}, {1, 3}}; + ConstructorCounted::constructor_calls = 0; + + absl::btree_map map{ + std::begin(i), std::end(i)}; + EXPECT_THAT(map, ElementsAre(Pair(0, 1), Pair(1, 2))); + EXPECT_EQ(ConstructorCounted::constructor_calls, 2); + + map.insert(std::begin(i), std::end(i)); + EXPECT_THAT(map, ElementsAre(Pair(0, 1), Pair(1, 2))); + EXPECT_EQ(ConstructorCounted::constructor_calls, 2); +} + +TEST(Btree, + MapRangeConstructorAndInsertSupportExplicitConversionNonComparable) { + const std::pair i[] = {{0, 1}, {1, 2}}; + + absl::btree_map, int> m1{std::begin(i), std::end(i)}; + EXPECT_THAT(m1, + ElementsAre(Pair(IsEmpty(), 1), Pair(ElementsAre(IsNull()), 2))); + + absl::btree_map, int> m2; + m2.insert(std::begin(i), std::end(i)); + EXPECT_THAT(m2, + ElementsAre(Pair(IsEmpty(), 1), Pair(ElementsAre(IsNull()), 2))); +} + +TEST(Btree, HeterogeneousTryEmplace) { + absl::btree_map m; + std::string s = "key"; + absl::string_view sv = s; + m.try_emplace(sv, 1); + EXPECT_EQ(m[s], 1); + + m.try_emplace(m.end(), sv, 2); + EXPECT_EQ(m[s], 1); +} + +TEST(Btree, HeterogeneousOperatorMapped) { + absl::btree_map m; + std::string s = "key"; + absl::string_view sv = s; + m[sv] = 1; + EXPECT_EQ(m[s], 1); + + m[sv] = 2; + EXPECT_EQ(m[s], 2); +} + +TEST(Btree, HeterogeneousInsertOrAssign) { + absl::btree_map m; + std::string s = "key"; + absl::string_view sv = s; + m.insert_or_assign(sv, 1); + EXPECT_EQ(m[s], 1); + + m.insert_or_assign(m.end(), sv, 2); + EXPECT_EQ(m[s], 2); +} +#endif + +// This test requires std::launder for mutable key access in node handles. +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 +TEST(Btree, NodeHandleMutableKeyAccess) { + { + absl::btree_map map; + + map["key1"] = "mapped"; + + auto nh = map.extract(map.begin()); + nh.key().resize(3); + map.insert(std::move(nh)); + + EXPECT_THAT(map, ElementsAre(Pair("key", "mapped"))); + } + // Also for multimap. + { + absl::btree_multimap map; + + map.emplace("key1", "mapped"); + + auto nh = map.extract(map.begin()); + nh.key().resize(3); + map.insert(std::move(nh)); + + EXPECT_THAT(map, ElementsAre(Pair("key", "mapped"))); + } +} +#endif + +struct MultiKey { + int i1; + int i2; +}; + +bool operator==(const MultiKey a, const MultiKey b) { + return a.i1 == b.i1 && a.i2 == b.i2; +} + +// A heterogeneous comparator that has different equivalence classes for +// different lookup types. +struct MultiKeyComp { + using is_transparent = void; + bool operator()(const MultiKey a, const MultiKey b) const { + if (a.i1 != b.i1) return a.i1 < b.i1; + return a.i2 < b.i2; + } + bool operator()(const int a, const MultiKey b) const { return a < b.i1; } + bool operator()(const MultiKey a, const int b) const { return a.i1 < b; } +}; + +// A heterogeneous, three-way comparator that has different equivalence classes +// for different lookup types. +struct MultiKeyThreeWayComp { + using is_transparent = void; + absl::weak_ordering operator()(const MultiKey a, const MultiKey b) const { + if (a.i1 < b.i1) return absl::weak_ordering::less; + if (a.i1 > b.i1) return absl::weak_ordering::greater; + if (a.i2 < b.i2) return absl::weak_ordering::less; + if (a.i2 > b.i2) return absl::weak_ordering::greater; + return absl::weak_ordering::equivalent; + } + absl::weak_ordering operator()(const int a, const MultiKey b) const { + if (a < b.i1) return absl::weak_ordering::less; + if (a > b.i1) return absl::weak_ordering::greater; + return absl::weak_ordering::equivalent; + } + absl::weak_ordering operator()(const MultiKey a, const int b) const { + if (a.i1 < b) return absl::weak_ordering::less; + if (a.i1 > b) return absl::weak_ordering::greater; + return absl::weak_ordering::equivalent; + } +}; + +template +class BtreeMultiKeyTest : public ::testing::Test {}; +using MultiKeyComps = ::testing::Types; +TYPED_TEST_SUITE(BtreeMultiKeyTest, MultiKeyComps); + +TYPED_TEST(BtreeMultiKeyTest, EqualRange) { + absl::btree_set set; + for (int i = 0; i < 100; ++i) { + for (int j = 0; j < 100; ++j) { + set.insert({i, j}); + } + } + + for (int i = 0; i < 100; ++i) { + auto equal_range = set.equal_range(i); + EXPECT_EQ(equal_range.first->i1, i); + EXPECT_EQ(equal_range.first->i2, 0) << i; + EXPECT_EQ(std::distance(equal_range.first, equal_range.second), 100) << i; + } +} + +TYPED_TEST(BtreeMultiKeyTest, Extract) { + absl::btree_set set; + for (int i = 0; i < 100; ++i) { + for (int j = 0; j < 100; ++j) { + set.insert({i, j}); + } + } + + for (int i = 0; i < 100; ++i) { + auto node_handle = set.extract(i); + EXPECT_EQ(node_handle.value().i1, i); + EXPECT_EQ(node_handle.value().i2, 0) << i; + } + + for (int i = 0; i < 100; ++i) { + auto node_handle = set.extract(i); + EXPECT_EQ(node_handle.value().i1, i); + EXPECT_EQ(node_handle.value().i2, 1) << i; + } +} + +TYPED_TEST(BtreeMultiKeyTest, Erase) { + absl::btree_set set = { + {1, 1}, {2, 1}, {2, 2}, {3, 1}}; + EXPECT_EQ(set.erase(2), 2); + EXPECT_THAT(set, ElementsAre(MultiKey{1, 1}, MultiKey{3, 1})); +} + +TYPED_TEST(BtreeMultiKeyTest, Count) { + const absl::btree_set set = { + {1, 1}, {2, 1}, {2, 2}, {3, 1}}; + EXPECT_EQ(set.count(2), 2); +} + +TEST(Btree, AllocConstructor) { + using Alloc = CountingAllocator; + using Set = absl::btree_set, Alloc>; + int64_t bytes_used = 0; + Alloc alloc(&bytes_used); + Set set(alloc); + + set.insert({1, 2, 3}); + + EXPECT_THAT(set, ElementsAre(1, 2, 3)); + EXPECT_GT(bytes_used, set.size() * sizeof(int)); +} + +TEST(Btree, AllocInitializerListConstructor) { + using Alloc = CountingAllocator; + using Set = absl::btree_set, Alloc>; + int64_t bytes_used = 0; + Alloc alloc(&bytes_used); + Set set({1, 2, 3}, alloc); + + EXPECT_THAT(set, ElementsAre(1, 2, 3)); + EXPECT_GT(bytes_used, set.size() * sizeof(int)); +} + +TEST(Btree, AllocRangeConstructor) { + using Alloc = CountingAllocator; + using Set = absl::btree_set, Alloc>; + int64_t bytes_used = 0; + Alloc alloc(&bytes_used); + std::vector v = {1, 2, 3}; + Set set(v.begin(), v.end(), alloc); + + EXPECT_THAT(set, ElementsAre(1, 2, 3)); + EXPECT_GT(bytes_used, set.size() * sizeof(int)); +} + +TEST(Btree, AllocCopyConstructor) { + using Alloc = CountingAllocator; + using Set = absl::btree_set, Alloc>; + int64_t bytes_used1 = 0; + Alloc alloc1(&bytes_used1); + Set set1(alloc1); + + set1.insert({1, 2, 3}); + + int64_t bytes_used2 = 0; + Alloc alloc2(&bytes_used2); + Set set2(set1, alloc2); + + EXPECT_THAT(set1, ElementsAre(1, 2, 3)); + EXPECT_THAT(set2, ElementsAre(1, 2, 3)); + EXPECT_GT(bytes_used1, set1.size() * sizeof(int)); + EXPECT_EQ(bytes_used1, bytes_used2); +} + +TEST(Btree, AllocMoveConstructor_SameAlloc) { + using Alloc = CountingAllocator; + using Set = absl::btree_set, Alloc>; + int64_t bytes_used = 0; + Alloc alloc(&bytes_used); + Set set1(alloc); + + set1.insert({1, 2, 3}); + + const int64_t original_bytes_used = bytes_used; + EXPECT_GT(original_bytes_used, set1.size() * sizeof(int)); + + Set set2(std::move(set1), alloc); + + EXPECT_THAT(set2, ElementsAre(1, 2, 3)); + EXPECT_EQ(bytes_used, original_bytes_used); +} + +TEST(Btree, AllocMoveConstructor_DifferentAlloc) { + using Alloc = CountingAllocator; + using Set = absl::btree_set, Alloc>; + int64_t bytes_used1 = 0; + Alloc alloc1(&bytes_used1); + Set set1(alloc1); + + set1.insert({1, 2, 3}); + + const int64_t original_bytes_used = bytes_used1; + EXPECT_GT(original_bytes_used, set1.size() * sizeof(int)); + + int64_t bytes_used2 = 0; + Alloc alloc2(&bytes_used2); + Set set2(std::move(set1), alloc2); + + EXPECT_THAT(set2, ElementsAre(1, 2, 3)); + // We didn't free these bytes allocated by `set1` yet. + EXPECT_EQ(bytes_used1, original_bytes_used); + EXPECT_EQ(bytes_used2, original_bytes_used); +} + +bool IntCmp(const int a, const int b) { return a < b; } + +TEST(Btree, SupportsFunctionPtrComparator) { + absl::btree_set set(IntCmp); + set.insert({1, 2, 3}); + EXPECT_THAT(set, ElementsAre(1, 2, 3)); + EXPECT_TRUE(set.key_comp()(1, 2)); + EXPECT_TRUE(set.value_comp()(1, 2)); + + absl::btree_map map(&IntCmp); + map[1] = 1; + EXPECT_THAT(map, ElementsAre(Pair(1, 1))); + EXPECT_TRUE(map.key_comp()(1, 2)); + EXPECT_TRUE(map.value_comp()(std::make_pair(1, 1), std::make_pair(2, 2))); +} + +template +struct TransparentPassThroughComp { + using is_transparent = void; + + // This will fail compilation if we attempt a comparison that Compare does not + // support, and the failure will happen inside the function implementation so + // it can't be avoided by using SFINAE on this comparator. + template + bool operator()(const T &lhs, const U &rhs) const { + return Compare()(lhs, rhs); + } +}; + +TEST(Btree, + SupportsTransparentComparatorThatDoesNotImplementAllVisibleOperators) { + absl::btree_set> set; + set.insert(MultiKey{1, 2}); + EXPECT_TRUE(set.contains(1)); +} + +TEST(Btree, ConstructImplicitlyWithUnadaptedComparator) { + absl::btree_set set = {{}, MultiKeyComp{}}; +} + +#ifndef NDEBUG +TEST(Btree, InvalidComparatorsCaught) { + { + struct ZeroAlwaysLessCmp { + bool operator()(int lhs, int rhs) const { + if (lhs == 0) return true; + return lhs < rhs; + } + }; + absl::btree_set set; + EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent"); + } + { + struct ThreeWayAlwaysLessCmp { + absl::weak_ordering operator()(int, int) const { + return absl::weak_ordering::less; + } + }; + absl::btree_set set; + EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent"); + } + { + struct SumGreaterZeroCmp { + bool operator()(int lhs, int rhs) const { + // First, do equivalence correctly - so we can test later condition. + if (lhs == rhs) return false; + return lhs + rhs > 0; + } + }; + absl::btree_set set; + // Note: '!' only needs to be escaped when it's the first character. + EXPECT_DEATH(set.insert({0, 1, 2}), + R"regex(\!lhs_comp_rhs \|\| !comp\(\)\(rhs, lhs\))regex"); + } + { + struct ThreeWaySumGreaterZeroCmp { + absl::weak_ordering operator()(int lhs, int rhs) const { + // First, do equivalence correctly - so we can test later condition. + if (lhs == rhs) return absl::weak_ordering::equivalent; + + if (lhs + rhs > 0) return absl::weak_ordering::less; + if (lhs + rhs == 0) return absl::weak_ordering::equivalent; + return absl::weak_ordering::greater; + } + }; + absl::btree_set set; + EXPECT_DEATH(set.insert({0, 1, 2}), "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0"); + } +} +#endif + +#ifndef _MSC_VER +// This test crashes on MSVC. +TEST(Btree, InvalidIteratorUse) { + if (!BtreeNodePeer::UsesGenerations>()) + GTEST_SKIP() << "Generation validation for iterators is disabled."; + + { + absl::btree_set set; + for (int i = 0; i < 10; ++i) set.insert(i); + auto it = set.begin(); + set.erase(it++); + EXPECT_DEATH(set.erase(it++), "invalidated iterator"); + } + { + absl::btree_set set; + for (int i = 0; i < 10; ++i) set.insert(i); + auto it = set.insert(20).first; + set.insert(30); + EXPECT_DEATH(*it, "invalidated iterator"); + } + { + absl::btree_set set; + for (int i = 0; i < 10000; ++i) set.insert(i); + auto it = set.find(5000); + ASSERT_NE(it, set.end()); + set.erase(1); + EXPECT_DEATH(*it, "invalidated iterator"); + } +} +#endif + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/btree_test.h b/TMessagesProj/jni/voip/webrtc/absl/container/btree_test.h new file mode 100644 index 000000000..624908072 --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/container/btree_test.h @@ -0,0 +1,166 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_BTREE_TEST_H_ +#define ABSL_CONTAINER_BTREE_TEST_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/container/btree_map.h" +#include "absl/container/btree_set.h" +#include "absl/container/flat_hash_set.h" +#include "absl/strings/cord.h" +#include "absl/time/time.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Like remove_const but propagates the removal through std::pair. +template +struct remove_pair_const { + using type = typename std::remove_const::type; +}; +template +struct remove_pair_const > { + using type = std::pair::type, + typename remove_pair_const::type>; +}; + +// Utility class to provide an accessor for a key given a value. The default +// behavior is to treat the value as a pair and return the first element. +template +struct KeyOfValue { + struct type { + const K& operator()(const V& p) const { return p.first; } + }; +}; + +// Partial specialization of KeyOfValue class for when the key and value are +// the same type such as in set<> and btree_set<>. +template +struct KeyOfValue { + struct type { + const K& operator()(const K& k) const { return k; } + }; +}; + +inline char* GenerateDigits(char buf[16], unsigned val, unsigned maxval) { + assert(val <= maxval); + constexpr unsigned kBase = 64; // avoid integer division. + unsigned p = 15; + buf[p--] = 0; + while (maxval > 0) { + buf[p--] = ' ' + (val % kBase); + val /= kBase; + maxval /= kBase; + } + return buf + p + 1; +} + +template +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + K operator()(int i) const { + assert(i <= maxval); + return K(i); + } +}; + +template <> +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + absl::Time operator()(int i) const { return absl::FromUnixMillis(i); } +}; + +template <> +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + std::string operator()(int i) const { + char buf[16]; + return GenerateDigits(buf, i, maxval); + } +}; + +template <> +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + Cord operator()(int i) const { + char buf[16]; + return Cord(GenerateDigits(buf, i, maxval)); + } +}; + +template +struct Generator > { + Generator::type> tgen; + Generator::type> ugen; + + explicit Generator(int m) : tgen(m), ugen(m) {} + std::pair operator()(int i) const { + return std::make_pair(tgen(i), ugen(i)); + } +}; + +// Generate n values for our tests and benchmarks. Value range is [0, maxval]. +inline std::vector GenerateNumbersWithSeed(int n, int maxval, int seed) { + // NOTE: Some tests rely on generated numbers not changing between test runs. + // We use std::minstd_rand0 because it is well-defined, but don't use + // std::uniform_int_distribution because platforms use different algorithms. + std::minstd_rand0 rng(seed); + + std::vector values; + absl::flat_hash_set unique_values; + if (values.size() < n) { + for (int i = values.size(); i < n; i++) { + int value; + do { + value = static_cast(rng()) % (maxval + 1); + } while (!unique_values.insert(value).second); + + values.push_back(value); + } + } + return values; +} + +// Generates n values in the range [0, maxval]. +template +std::vector GenerateValuesWithSeed(int n, int maxval, int seed) { + const std::vector nums = GenerateNumbersWithSeed(n, maxval, seed); + Generator gen(maxval); + std::vector vec; + + vec.reserve(n); + for (int i = 0; i < n; i++) { + vec.push_back(gen(nums[i])); + } + + return vec; +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_BTREE_TEST_H_ diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/fixed_array.h b/TMessagesProj/jni/voip/webrtc/absl/container/fixed_array.h index 94385ea7a..839ba0bc1 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/fixed_array.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/fixed_array.h @@ -41,6 +41,7 @@ #include #include "absl/algorithm/algorithm.h" +#include "absl/base/config.h" #include "absl/base/dynamic_annotations.h" #include "absl/base/internal/throw_delegate.h" #include "absl/base/macros.h" @@ -72,11 +73,6 @@ constexpr static auto kFixedArrayUseDefault = static_cast(-1); // uninitialized (e.g. int, int[4], double), and others default-constructed. // This matches the behavior of c-style arrays and `std::array`, but not // `std::vector`. -// -// Note that `FixedArray` does not provide a public allocator; if it requires a -// heap allocation, it will do so with global `::operator new[]()` and -// `::operator delete[]()`, even if T provides class-scope overrides for these -// operators. template > class FixedArray { @@ -106,13 +102,13 @@ class FixedArray { public: using allocator_type = typename AllocatorTraits::allocator_type; - using value_type = typename allocator_type::value_type; - using pointer = typename allocator_type::pointer; - using const_pointer = typename allocator_type::const_pointer; - using reference = typename allocator_type::reference; - using const_reference = typename allocator_type::const_reference; - using size_type = typename allocator_type::size_type; - using difference_type = typename allocator_type::difference_type; + using value_type = typename AllocatorTraits::value_type; + using pointer = typename AllocatorTraits::pointer; + using const_pointer = typename AllocatorTraits::const_pointer; + using reference = value_type&; + using const_reference = const value_type&; + using size_type = typename AllocatorTraits::size_type; + using difference_type = typename AllocatorTraits::difference_type; using iterator = pointer; using const_iterator = const_pointer; using reverse_iterator = std::reverse_iterator; @@ -231,8 +227,8 @@ class FixedArray { // FixedArray::at // - // Bounds-checked access. Returns a reference to the ith element of the - // fiexed array, or throws std::out_of_range + // Bounds-checked access. Returns a reference to the ith element of the fixed + // array, or throws std::out_of_range reference at(size_type i) { if (ABSL_PREDICT_FALSE(i >= size())) { base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); @@ -422,10 +418,10 @@ class FixedArray { void AnnotateConstruct(size_type n); void AnnotateDestruct(size_type n); -#ifdef ADDRESS_SANITIZER +#ifdef ABSL_HAVE_ADDRESS_SANITIZER void* RedzoneBegin() { return &redzone_begin_; } void* RedzoneEnd() { return &redzone_end_ + 1; } -#endif // ADDRESS_SANITIZER +#endif // ABSL_HAVE_ADDRESS_SANITIZER private: ABSL_ADDRESS_SANITIZER_REDZONE(redzone_begin_); @@ -503,22 +499,26 @@ constexpr typename FixedArray::size_type template void FixedArray::NonEmptyInlinedStorage::AnnotateConstruct( typename FixedArray::size_type n) { -#ifdef ADDRESS_SANITIZER +#ifdef ABSL_HAVE_ADDRESS_SANITIZER if (!n) return; - ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), data() + n); - ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), RedzoneBegin()); -#endif // ADDRESS_SANITIZER + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), + data() + n); + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), + RedzoneBegin()); +#endif // ABSL_HAVE_ADDRESS_SANITIZER static_cast(n); // Mark used when not in asan mode } template void FixedArray::NonEmptyInlinedStorage::AnnotateDestruct( typename FixedArray::size_type n) { -#ifdef ADDRESS_SANITIZER +#ifdef ABSL_HAVE_ADDRESS_SANITIZER if (!n) return; - ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, RedzoneEnd()); - ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), data()); -#endif // ADDRESS_SANITIZER + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, + RedzoneEnd()); + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), + data()); +#endif // ABSL_HAVE_ADDRESS_SANITIZER static_cast(n); // Mark used when not in asan mode } ABSL_NAMESPACE_END diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/flat_hash_map.h b/TMessagesProj/jni/voip/webrtc/absl/container/flat_hash_map.h index fcb70d861..83c71029d 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/flat_hash_map.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/flat_hash_map.h @@ -36,6 +36,7 @@ #include #include "absl/algorithm/container.h" +#include "absl/base/macros.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export @@ -234,7 +235,8 @@ class flat_hash_map : public absl::container_internal::raw_hash_map< // // size_type erase(const key_type& key): // - // Erases the element with the matching key, if it exists. + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). using Base::erase; // flat_hash_map::insert() @@ -383,6 +385,11 @@ class flat_hash_map : public absl::container_internal::raw_hash_map< // key value and returns a node handle owning that extracted data. If the // `flat_hash_map` does not contain an element with a matching key, this // function returns an empty node handle. + // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). using Base::extract; // flat_hash_map::merge() @@ -535,10 +542,12 @@ class flat_hash_map : public absl::container_internal::raw_hash_map< // erase_if(flat_hash_map<>, Pred) // // Erases all elements that satisfy the predicate `pred` from the container `c`. +// Returns the number of erased elements. template -void erase_if(flat_hash_map& c, Predicate pred) { - container_internal::EraseIf(pred, &c); +typename flat_hash_map::size_type erase_if( + flat_hash_map& c, Predicate pred) { + return container_internal::EraseIf(pred, &c); } namespace container_internal { diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/flat_hash_set.h b/TMessagesProj/jni/voip/webrtc/absl/container/flat_hash_set.h index 94be6e3d1..0fb2ae6fe 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/flat_hash_set.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/flat_hash_set.h @@ -67,7 +67,7 @@ struct FlatHashSetPolicy; // // By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All // fundamental and Abseil types that support the `absl::Hash` framework have a -// compatible equality operator for comparing insertions into `flat_hash_map`. +// compatible equality operator for comparing insertions into `flat_hash_set`. // If your type is not yet supported by the `absl::Hash` framework, see // absl/hash/hash.h for information on extending Abseil hashing to user-defined // types. @@ -106,7 +106,7 @@ class flat_hash_set public: // Constructors and Assignment Operators // - // A flat_hash_set supports the same overload set as `std::unordered_map` + // A flat_hash_set supports the same overload set as `std::unordered_set` // for construction and assignment: // // * Default constructor @@ -173,7 +173,7 @@ class flat_hash_set // available within the `flat_hash_set`. // // NOTE: this member function is particular to `absl::flat_hash_set` and is - // not provided in the `std::unordered_map` API. + // not provided in the `std::unordered_set` API. using Base::capacity; // flat_hash_set::empty() @@ -227,7 +227,8 @@ class flat_hash_set // // size_type erase(const key_type& key): // - // Erases the element with the matching key, if it exists. + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). using Base::erase; // flat_hash_set::insert() @@ -323,7 +324,7 @@ class flat_hash_set // flat_hash_set::merge() // - // Extracts elements from a given `source` flat hash map into this + // Extracts elements from a given `source` flat hash set into this // `flat_hash_set`. If the destination `flat_hash_set` already contains an // element with an equivalent key, that element is not extracted. using Base::merge; @@ -331,7 +332,7 @@ class flat_hash_set // flat_hash_set::swap(flat_hash_set& other) // // Exchanges the contents of this `flat_hash_set` with those of the `other` - // flat hash map, avoiding invocation of any move, copy, or swap operations on + // flat hash set, avoiding invocation of any move, copy, or swap operations on // individual elements. // // All iterators and references on the `flat_hash_set` remain valid, excepting @@ -339,7 +340,7 @@ class flat_hash_set // // `swap()` requires that the flat hash set's hashing and key equivalence // functions be Swappable, and are exchaged using unqualified calls to - // non-member `swap()`. If the map's allocator has + // non-member `swap()`. If the set's allocator has // `std::allocator_traits::propagate_on_container_swap::value` // set to `true`, the allocators are also exchanged using an unqualified call // to non-member `swap()`; otherwise, the allocators are not swapped. @@ -394,14 +395,14 @@ class flat_hash_set // flat_hash_set::bucket_count() // // Returns the number of "buckets" within the `flat_hash_set`. Note that - // because a flat hash map contains all elements within its internal storage, + // because a flat hash set contains all elements within its internal storage, // this value simply equals the current capacity of the `flat_hash_set`. using Base::bucket_count; // flat_hash_set::load_factor() // // Returns the current load factor of the `flat_hash_set` (the average number - // of slots occupied with a value within the hash map). + // of slots occupied with a value within the hash set). using Base::load_factor; // flat_hash_set::max_load_factor() @@ -442,9 +443,11 @@ class flat_hash_set // erase_if(flat_hash_set<>, Pred) // // Erases all elements that satisfy the predicate `pred` from the container `c`. +// Returns the number of erased elements. template -void erase_if(flat_hash_set& c, Predicate pred) { - container_internal::EraseIf(pred, &c); +typename flat_hash_set::size_type erase_if( + flat_hash_set& c, Predicate pred) { + return container_internal::EraseIf(pred, &c); } namespace container_internal { diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/inlined_vector.h b/TMessagesProj/jni/voip/webrtc/absl/container/inlined_vector.h index 5f6f6154b..711b29c18 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/inlined_vector.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/inlined_vector.h @@ -36,7 +36,6 @@ #define ABSL_CONTAINER_INLINED_VECTOR_H_ #include -#include #include #include #include @@ -64,7 +63,7 @@ ABSL_NAMESPACE_BEGIN // `std::vector` for use cases where the vector's size is sufficiently small // that it can be inlined. If the inlined vector does grow beyond its estimated // capacity, it will trigger an initial allocation on the heap, and will behave -// as a `std:vector`. The API of the `absl::InlinedVector` within this file is +// as a `std::vector`. The API of the `absl::InlinedVector` within this file is // designed to cover the same API footprint as covered by `std::vector`. template > class InlinedVector { @@ -72,37 +71,43 @@ class InlinedVector { using Storage = inlined_vector_internal::Storage; - using AllocatorTraits = typename Storage::AllocatorTraits; - using RValueReference = typename Storage::RValueReference; - using MoveIterator = typename Storage::MoveIterator; - using IsMemcpyOk = typename Storage::IsMemcpyOk; + template + using AllocatorTraits = inlined_vector_internal::AllocatorTraits; + template + using MoveIterator = inlined_vector_internal::MoveIterator; + template + using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk; - template + template using IteratorValueAdapter = - typename Storage::template IteratorValueAdapter; - using CopyValueAdapter = typename Storage::CopyValueAdapter; - using DefaultValueAdapter = typename Storage::DefaultValueAdapter; + inlined_vector_internal::IteratorValueAdapter; + template + using CopyValueAdapter = inlined_vector_internal::CopyValueAdapter; + template + using DefaultValueAdapter = + inlined_vector_internal::DefaultValueAdapter; template using EnableIfAtLeastForwardIterator = absl::enable_if_t< - inlined_vector_internal::IsAtLeastForwardIterator::value>; + inlined_vector_internal::IsAtLeastForwardIterator::value, int>; template using DisableIfAtLeastForwardIterator = absl::enable_if_t< - !inlined_vector_internal::IsAtLeastForwardIterator::value>; + !inlined_vector_internal::IsAtLeastForwardIterator::value, int>; public: - using allocator_type = typename Storage::allocator_type; - using value_type = typename Storage::value_type; - using pointer = typename Storage::pointer; - using const_pointer = typename Storage::const_pointer; - using size_type = typename Storage::size_type; - using difference_type = typename Storage::difference_type; - using reference = typename Storage::reference; - using const_reference = typename Storage::const_reference; - using iterator = typename Storage::iterator; - using const_iterator = typename Storage::const_iterator; - using reverse_iterator = typename Storage::reverse_iterator; - using const_reverse_iterator = typename Storage::const_reverse_iterator; + using allocator_type = A; + using value_type = inlined_vector_internal::ValueType; + using pointer = inlined_vector_internal::Pointer; + using const_pointer = inlined_vector_internal::ConstPointer; + using size_type = inlined_vector_internal::SizeType; + using difference_type = inlined_vector_internal::DifferenceType; + using reference = inlined_vector_internal::Reference; + using const_reference = inlined_vector_internal::ConstReference; + using iterator = inlined_vector_internal::Iterator; + using const_iterator = inlined_vector_internal::ConstIterator; + using reverse_iterator = inlined_vector_internal::ReverseIterator; + using const_reverse_iterator = + inlined_vector_internal::ConstReverseIterator; // --------------------------------------------------------------------------- // InlinedVector Constructors and Destructor @@ -111,28 +116,28 @@ class InlinedVector { // Creates an empty inlined vector with a value-initialized allocator. InlinedVector() noexcept(noexcept(allocator_type())) : storage_() {} - // Creates an empty inlined vector with a copy of `alloc`. - explicit InlinedVector(const allocator_type& alloc) noexcept - : storage_(alloc) {} + // Creates an empty inlined vector with a copy of `allocator`. + explicit InlinedVector(const allocator_type& allocator) noexcept + : storage_(allocator) {} // Creates an inlined vector with `n` copies of `value_type()`. explicit InlinedVector(size_type n, - const allocator_type& alloc = allocator_type()) - : storage_(alloc) { - storage_.Initialize(DefaultValueAdapter(), n); + const allocator_type& allocator = allocator_type()) + : storage_(allocator) { + storage_.Initialize(DefaultValueAdapter(), n); } // Creates an inlined vector with `n` copies of `v`. InlinedVector(size_type n, const_reference v, - const allocator_type& alloc = allocator_type()) - : storage_(alloc) { - storage_.Initialize(CopyValueAdapter(v), n); + const allocator_type& allocator = allocator_type()) + : storage_(allocator) { + storage_.Initialize(CopyValueAdapter(std::addressof(v)), n); } // Creates an inlined vector with copies of the elements of `list`. InlinedVector(std::initializer_list list, - const allocator_type& alloc = allocator_type()) - : InlinedVector(list.begin(), list.end(), alloc) {} + const allocator_type& allocator = allocator_type()) + : InlinedVector(list.begin(), list.end(), allocator) {} // Creates an inlined vector with elements constructed from the provided // forward iterator range [`first`, `last`). @@ -141,37 +146,40 @@ class InlinedVector { // this constructor with two integral arguments and a call to the above // `InlinedVector(size_type, const_reference)` constructor. template * = nullptr> + EnableIfAtLeastForwardIterator = 0> InlinedVector(ForwardIterator first, ForwardIterator last, - const allocator_type& alloc = allocator_type()) - : storage_(alloc) { - storage_.Initialize(IteratorValueAdapter(first), - std::distance(first, last)); + const allocator_type& allocator = allocator_type()) + : storage_(allocator) { + storage_.Initialize(IteratorValueAdapter(first), + static_cast(std::distance(first, last))); } // Creates an inlined vector with elements constructed from the provided input // iterator range [`first`, `last`). template * = nullptr> + DisableIfAtLeastForwardIterator = 0> InlinedVector(InputIterator first, InputIterator last, - const allocator_type& alloc = allocator_type()) - : storage_(alloc) { + const allocator_type& allocator = allocator_type()) + : storage_(allocator) { std::copy(first, last, std::back_inserter(*this)); } // Creates an inlined vector by copying the contents of `other` using // `other`'s allocator. InlinedVector(const InlinedVector& other) - : InlinedVector(other, *other.storage_.GetAllocPtr()) {} + : InlinedVector(other, other.storage_.GetAllocator()) {} - // Creates an inlined vector by copying the contents of `other` using `alloc`. - InlinedVector(const InlinedVector& other, const allocator_type& alloc) - : storage_(alloc) { - if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) { + // Creates an inlined vector by copying the contents of `other` using the + // provided `allocator`. + InlinedVector(const InlinedVector& other, const allocator_type& allocator) + : storage_(allocator) { + if (other.empty()) { + // Empty; nothing to do. + } else if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) { + // Memcpy-able and do not need allocation. storage_.MemcpyFrom(other.storage_); } else { - storage_.Initialize(IteratorValueAdapter(other.data()), - other.size()); + storage_.InitFrom(other.storage_); } } @@ -192,23 +200,23 @@ class InlinedVector { InlinedVector(InlinedVector&& other) noexcept( absl::allocator_is_nothrow::value || std::is_nothrow_move_constructible::value) - : storage_(*other.storage_.GetAllocPtr()) { - if (IsMemcpyOk::value) { + : storage_(other.storage_.GetAllocator()) { + if (IsMemcpyOk::value) { storage_.MemcpyFrom(other.storage_); other.storage_.SetInlinedSize(0); } else if (other.storage_.GetIsAllocated()) { - storage_.SetAllocatedData(other.storage_.GetAllocatedData(), - other.storage_.GetAllocatedCapacity()); + storage_.SetAllocation({other.storage_.GetAllocatedData(), + other.storage_.GetAllocatedCapacity()}); storage_.SetAllocatedSize(other.storage_.GetSize()); other.storage_.SetInlinedSize(0); } else { - IteratorValueAdapter other_values( - MoveIterator(other.storage_.GetInlinedData())); + IteratorValueAdapter> other_values( + MoveIterator(other.storage_.GetInlinedData())); - inlined_vector_internal::ConstructElements( - storage_.GetAllocPtr(), storage_.GetInlinedData(), &other_values, + inlined_vector_internal::ConstructElements( + storage_.GetAllocator(), storage_.GetInlinedData(), other_values, other.storage_.GetSize()); storage_.SetInlinedSize(other.storage_.GetSize()); @@ -216,30 +224,32 @@ class InlinedVector { } // Creates an inlined vector by moving in the contents of `other` with a copy - // of `alloc`. + // of `allocator`. // - // NOTE: if `other`'s allocator is not equal to `alloc`, even if `other` + // NOTE: if `other`'s allocator is not equal to `allocator`, even if `other` // contains allocated memory, this move constructor will still allocate. Since // allocation is performed, this constructor can only be `noexcept` if the // specified allocator is also `noexcept`. - InlinedVector(InlinedVector&& other, const allocator_type& alloc) noexcept( - absl::allocator_is_nothrow::value) - : storage_(alloc) { - if (IsMemcpyOk::value) { + InlinedVector( + InlinedVector&& other, + const allocator_type& + allocator) noexcept(absl::allocator_is_nothrow::value) + : storage_(allocator) { + if (IsMemcpyOk::value) { storage_.MemcpyFrom(other.storage_); other.storage_.SetInlinedSize(0); - } else if ((*storage_.GetAllocPtr() == *other.storage_.GetAllocPtr()) && + } else if ((storage_.GetAllocator() == other.storage_.GetAllocator()) && other.storage_.GetIsAllocated()) { - storage_.SetAllocatedData(other.storage_.GetAllocatedData(), - other.storage_.GetAllocatedCapacity()); + storage_.SetAllocation({other.storage_.GetAllocatedData(), + other.storage_.GetAllocatedCapacity()}); storage_.SetAllocatedSize(other.storage_.GetSize()); other.storage_.SetInlinedSize(0); } else { - storage_.Initialize( - IteratorValueAdapter(MoveIterator(other.data())), - other.size()); + storage_.Initialize(IteratorValueAdapter>( + MoveIterator(other.data())), + other.size()); } } @@ -351,14 +361,14 @@ class InlinedVector { // Returns a `reference` to the first element of the inlined vector. reference front() { ABSL_HARDENING_ASSERT(!empty()); - return at(0); + return data()[0]; } // Overload of `InlinedVector::front()` that returns a `const_reference` to // the first element of the inlined vector. const_reference front() const { ABSL_HARDENING_ASSERT(!empty()); - return at(0); + return data()[0]; } // `InlinedVector::back()` @@ -366,14 +376,14 @@ class InlinedVector { // Returns a `reference` to the last element of the inlined vector. reference back() { ABSL_HARDENING_ASSERT(!empty()); - return at(size() - 1); + return data()[size() - 1]; } // Overload of `InlinedVector::back()` that returns a `const_reference` to the // last element of the inlined vector. const_reference back() const { ABSL_HARDENING_ASSERT(!empty()); - return at(size() - 1); + return data()[size() - 1]; } // `InlinedVector::begin()` @@ -440,7 +450,7 @@ class InlinedVector { // `InlinedVector::get_allocator()` // // Returns a copy of the inlined vector's allocator. - allocator_type get_allocator() const { return *storage_.GetAllocPtr(); } + allocator_type get_allocator() const { return storage_.GetAllocator(); } // --------------------------------------------------------------------------- // InlinedVector Member Mutators @@ -474,16 +484,16 @@ class InlinedVector { // unspecified state. InlinedVector& operator=(InlinedVector&& other) { if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { - if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) { - inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(), - size()); + if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) { + inlined_vector_internal::DestroyAdapter::DestroyElements( + storage_.GetAllocator(), data(), size()); storage_.DeallocateIfAllocated(); storage_.MemcpyFrom(other.storage_); other.storage_.SetInlinedSize(0); } else { - storage_.Assign(IteratorValueAdapter( - MoveIterator(other.storage_.GetInlinedData())), + storage_.Assign(IteratorValueAdapter>( + MoveIterator(other.storage_.GetInlinedData())), other.size()); } } @@ -495,7 +505,7 @@ class InlinedVector { // // Replaces the contents of the inlined vector with `n` copies of `v`. void assign(size_type n, const_reference v) { - storage_.Assign(CopyValueAdapter(v), n); + storage_.Assign(CopyValueAdapter(std::addressof(v)), n); } // Overload of `InlinedVector::assign(...)` that replaces the contents of the @@ -509,10 +519,10 @@ class InlinedVector { // // NOTE: this overload is for iterators that are "forward" category or better. template * = nullptr> + EnableIfAtLeastForwardIterator = 0> void assign(ForwardIterator first, ForwardIterator last) { - storage_.Assign(IteratorValueAdapter(first), - std::distance(first, last)); + storage_.Assign(IteratorValueAdapter(first), + static_cast(std::distance(first, last))); } // Overload of `InlinedVector::assign(...)` to replace the contents of the @@ -520,11 +530,11 @@ class InlinedVector { // // NOTE: this overload is for iterators that are "input" category. template * = nullptr> + DisableIfAtLeastForwardIterator = 0> void assign(InputIterator first, InputIterator last) { size_type i = 0; for (; i < size() && first != last; ++i, static_cast(++first)) { - at(i) = *first; + data()[i] = *first; } erase(data() + i, data() + size()); @@ -535,9 +545,12 @@ class InlinedVector { // // Resizes the inlined vector to contain `n` elements. // - // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n` + // NOTE: If `n` is smaller than `size()`, extra elements are destroyed. If `n` // is larger than `size()`, new elements are value-initialized. - void resize(size_type n) { storage_.Resize(DefaultValueAdapter(), n); } + void resize(size_type n) { + ABSL_HARDENING_ASSERT(n <= max_size()); + storage_.Resize(DefaultValueAdapter(), n); + } // Overload of `InlinedVector::resize(...)` that resizes the inlined vector to // contain `n` elements. @@ -545,7 +558,8 @@ class InlinedVector { // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n` // is larger than `size()`, new elements are copied-constructed from `v`. void resize(size_type n, const_reference v) { - storage_.Resize(CopyValueAdapter(v), n); + ABSL_HARDENING_ASSERT(n <= max_size()); + storage_.Resize(CopyValueAdapter(std::addressof(v)), n); } // `InlinedVector::insert(...)` @@ -558,7 +572,7 @@ class InlinedVector { // Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using // move semantics, returning an `iterator` to the newly inserted element. - iterator insert(const_iterator pos, RValueReference v) { + iterator insert(const_iterator pos, value_type&& v) { return emplace(pos, std::move(v)); } @@ -571,7 +585,8 @@ class InlinedVector { if (ABSL_PREDICT_TRUE(n != 0)) { value_type dealias = v; - return storage_.Insert(pos, CopyValueAdapter(dealias), n); + return storage_.Insert(pos, CopyValueAdapter(std::addressof(dealias)), + n); } else { return const_cast(pos); } @@ -590,14 +605,15 @@ class InlinedVector { // // NOTE: this overload is for iterators that are "forward" category or better. template * = nullptr> + EnableIfAtLeastForwardIterator = 0> iterator insert(const_iterator pos, ForwardIterator first, ForwardIterator last) { ABSL_HARDENING_ASSERT(pos >= begin()); ABSL_HARDENING_ASSERT(pos <= end()); if (ABSL_PREDICT_TRUE(first != last)) { - return storage_.Insert(pos, IteratorValueAdapter(first), + return storage_.Insert(pos, + IteratorValueAdapter(first), std::distance(first, last)); } else { return const_cast(pos); @@ -610,7 +626,7 @@ class InlinedVector { // // NOTE: this overload is for iterators that are "input" category. template * = nullptr> + DisableIfAtLeastForwardIterator = 0> iterator insert(const_iterator pos, InputIterator first, InputIterator last) { ABSL_HARDENING_ASSERT(pos >= begin()); ABSL_HARDENING_ASSERT(pos <= end()); @@ -634,8 +650,8 @@ class InlinedVector { value_type dealias(std::forward(args)...); return storage_.Insert(pos, - IteratorValueAdapter( - MoveIterator(std::addressof(dealias))), + IteratorValueAdapter>( + MoveIterator(std::addressof(dealias))), 1); } @@ -655,7 +671,7 @@ class InlinedVector { // Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()` // using move semantics. - void push_back(RValueReference v) { + void push_back(value_type&& v) { static_cast(emplace_back(std::move(v))); } @@ -665,7 +681,7 @@ class InlinedVector { void pop_back() noexcept { ABSL_HARDENING_ASSERT(!empty()); - AllocatorTraits::destroy(*storage_.GetAllocPtr(), data() + (size() - 1)); + AllocatorTraits::destroy(storage_.GetAllocator(), data() + (size() - 1)); storage_.SubtractSize(1); } @@ -704,8 +720,8 @@ class InlinedVector { // Destroys all elements in the inlined vector, setting the size to `0` and // deallocating any held memory. void clear() noexcept { - inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(), - size()); + inlined_vector_internal::DestroyAdapter::DestroyElements( + storage_.GetAllocator(), data(), size()); storage_.DeallocateIfAllocated(); storage_.SetInlinedSize(0); @@ -718,15 +734,12 @@ class InlinedVector { // `InlinedVector::shrink_to_fit()` // - // Reduces memory usage by freeing unused memory. After being called, calls to - // `capacity()` will be equal to `max(N, size())`. + // Attempts to reduce memory usage by moving elements to (or keeping elements + // in) the smallest available buffer sufficient for containing `size()` + // elements. // - // If `size() <= N` and the inlined vector contains allocated memory, the - // elements will all be moved to the inlined space and the allocated memory - // will be deallocated. - // - // If `size() > N` and `size() < capacity()`, the elements will be moved to a - // smaller allocation. + // If `size()` is sufficiently small, the elements will be moved into (or kept + // in) the inlined space. void shrink_to_fit() { if (storage_.GetIsAllocated()) { storage_.ShrinkToFit(); diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/inlined_vector_benchmark.cc b/TMessagesProj/jni/voip/webrtc/absl/container/inlined_vector_benchmark.cc index b8dafe932..e256fad60 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/inlined_vector_benchmark.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/container/inlined_vector_benchmark.cc @@ -534,6 +534,28 @@ void BM_ConstructFromMove(benchmark::State& state) { ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType); ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType); +// Measure cost of copy-constructor+destructor. +void BM_CopyTrivial(benchmark::State& state) { + const int n = state.range(0); + InlVec src(n); + for (auto s : state) { + InlVec copy(src); + benchmark::DoNotOptimize(copy); + } +} +BENCHMARK(BM_CopyTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize); + +// Measure cost of copy-constructor+destructor. +void BM_CopyNonTrivial(benchmark::State& state) { + const int n = state.range(0); + InlVec> src(n); + for (auto s : state) { + InlVec> copy(src); + benchmark::DoNotOptimize(copy); + } +} +BENCHMARK(BM_CopyNonTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize); + template void BM_AssignSizeRef(benchmark::State& state) { auto size = ToSize; diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/btree.h b/TMessagesProj/jni/voip/webrtc/absl/container/internal/btree.h index 4504e9ce6..6c10b00f4 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/btree.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/btree.h @@ -58,6 +58,7 @@ #include #include +#include "absl/base/internal/raw_logging.h" #include "absl/base/macros.h" #include "absl/container/internal/common.h" #include "absl/container/internal/compressed_tuple.h" @@ -74,12 +75,24 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS +#error ABSL_BTREE_ENABLE_GENERATIONS cannot be directly set +#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_MEMORY_SANITIZER) +// When compiled in sanitizer mode, we add generation integers to the nodes and +// iterators. When iterators are used, we validate that the container has not +// been mutated since the iterator was constructed. +#define ABSL_BTREE_ENABLE_GENERATIONS +#endif + +template +using compare_result_t = absl::result_of_t; + // A helper class that indicates if the Compare parameter is a key-compare-to // comparator. template using btree_is_key_compare_to = - std::is_convertible, - absl::weak_ordering>; + std::is_convertible, absl::weak_ordering>; struct StringBtreeDefaultLess { using is_transparent = void; @@ -88,7 +101,12 @@ struct StringBtreeDefaultLess { // Compatibility constructor. StringBtreeDefaultLess(std::less) {} // NOLINT - StringBtreeDefaultLess(std::less) {} // NOLINT + StringBtreeDefaultLess(std::less) {} // NOLINT + + // Allow converting to std::less for use in key_comp()/value_comp(). + explicit operator std::less() const { return {}; } + explicit operator std::less() const { return {}; } + explicit operator std::less() const { return {}; } absl::weak_ordering operator()(absl::string_view lhs, absl::string_view rhs) const { @@ -115,7 +133,12 @@ struct StringBtreeDefaultGreater { StringBtreeDefaultGreater() = default; StringBtreeDefaultGreater(std::greater) {} // NOLINT - StringBtreeDefaultGreater(std::greater) {} // NOLINT + StringBtreeDefaultGreater(std::greater) {} // NOLINT + + // Allow converting to std::greater for use in key_comp()/value_comp(). + explicit operator std::greater() const { return {}; } + explicit operator std::greater() const { return {}; } + explicit operator std::greater() const { return {}; } absl::weak_ordering operator()(absl::string_view lhs, absl::string_view rhs) const { @@ -136,71 +159,222 @@ struct StringBtreeDefaultGreater { } }; -// A helper class to convert a boolean comparison into a three-way "compare-to" -// comparison that returns a negative value to indicate less-than, zero to -// indicate equality and a positive value to indicate greater-than. This helper -// class is specialized for less, greater, -// less, greater, less, and -// greater. -// -// key_compare_to_adapter is provided so that btree users -// automatically get the more efficient compare-to code when using common -// google string types with common comparison functors. -// These string-like specializations also turn on heterogeneous lookup by -// default. +// See below comments for checked_compare. +template ::value> +struct checked_compare_base : Compare { + using Compare::Compare; + explicit checked_compare_base(Compare c) : Compare(std::move(c)) {} + const Compare &comp() const { return *this; } +}; template -struct key_compare_to_adapter { - using type = Compare; +struct checked_compare_base { + explicit checked_compare_base(Compare c) : compare(std::move(c)) {} + const Compare &comp() const { return compare; } + Compare compare; +}; + +// A mechanism for opting out of checked_compare for use only in btree_test.cc. +struct BtreeTestOnlyCheckedCompareOptOutBase {}; + +// A helper class to adapt the specified comparator for two use cases: +// (1) When using common Abseil string types with common comparison functors, +// convert a boolean comparison into a three-way comparison that returns an +// `absl::weak_ordering`. This helper class is specialized for +// less, greater, less, +// greater, less, and greater. +// (2) Adapt the comparator to diagnose cases of non-strict-weak-ordering (see +// https://en.cppreference.com/w/cpp/named_req/Compare) in debug mode. Whenever +// a comparison is made, we will make assertions to verify that the comparator +// is valid. +template +struct key_compare_adapter { + // Inherit from checked_compare_base to support function pointers and also + // keep empty-base-optimization (EBO) support for classes. + // Note: we can't use CompressedTuple here because that would interfere + // with the EBO for `btree::root_`. `btree::root_` is itself a CompressedTuple + // and nested `CompressedTuple`s don't support EBO. + // TODO(b/214288561): use CompressedTuple instead once it supports EBO for + // nested `CompressedTuple`s. + struct checked_compare : checked_compare_base { + private: + using Base = typename checked_compare::checked_compare_base; + using Base::comp; + + // If possible, returns whether `t` is equivalent to itself. We can only do + // this for `Key`s because we can't be sure that it's safe to call + // `comp()(k, k)` otherwise. Even if SFINAE allows it, there could be a + // compilation failure inside the implementation of the comparison operator. + bool is_self_equivalent(const Key &k) const { + // Note: this works for both boolean and three-way comparators. + return comp()(k, k) == 0; + } + // If we can't compare `t` with itself, returns true unconditionally. + template + bool is_self_equivalent(const T &) const { + return true; + } + + public: + using Base::Base; + checked_compare(Compare comp) : Base(std::move(comp)) {} // NOLINT + + // Allow converting to Compare for use in key_comp()/value_comp(). + explicit operator Compare() const { return comp(); } + + template >::value, + int> = 0> + bool operator()(const T &lhs, const U &rhs) const { + // NOTE: if any of these assertions fail, then the comparator does not + // establish a strict-weak-ordering (see + // https://en.cppreference.com/w/cpp/named_req/Compare). + assert(is_self_equivalent(lhs)); + assert(is_self_equivalent(rhs)); + const bool lhs_comp_rhs = comp()(lhs, rhs); + assert(!lhs_comp_rhs || !comp()(rhs, lhs)); + return lhs_comp_rhs; + } + + template < + typename T, typename U, + absl::enable_if_t, + absl::weak_ordering>::value, + int> = 0> + absl::weak_ordering operator()(const T &lhs, const U &rhs) const { + // NOTE: if any of these assertions fail, then the comparator does not + // establish a strict-weak-ordering (see + // https://en.cppreference.com/w/cpp/named_req/Compare). + assert(is_self_equivalent(lhs)); + assert(is_self_equivalent(rhs)); + const absl::weak_ordering lhs_comp_rhs = comp()(lhs, rhs); +#ifndef NDEBUG + const absl::weak_ordering rhs_comp_lhs = comp()(rhs, lhs); + if (lhs_comp_rhs > 0) { + assert(rhs_comp_lhs < 0 && "lhs_comp_rhs > 0 -> rhs_comp_lhs < 0"); + } else if (lhs_comp_rhs == 0) { + assert(rhs_comp_lhs == 0 && "lhs_comp_rhs == 0 -> rhs_comp_lhs == 0"); + } else { + assert(rhs_comp_lhs > 0 && "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0"); + } +#endif + return lhs_comp_rhs; + } + }; + using type = absl::conditional_t< + std::is_base_of::value, + Compare, checked_compare>; }; template <> -struct key_compare_to_adapter> { +struct key_compare_adapter, std::string> { using type = StringBtreeDefaultLess; }; template <> -struct key_compare_to_adapter> { +struct key_compare_adapter, std::string> { using type = StringBtreeDefaultGreater; }; template <> -struct key_compare_to_adapter> { +struct key_compare_adapter, absl::string_view> { using type = StringBtreeDefaultLess; }; template <> -struct key_compare_to_adapter> { +struct key_compare_adapter, absl::string_view> { using type = StringBtreeDefaultGreater; }; template <> -struct key_compare_to_adapter> { +struct key_compare_adapter, absl::Cord> { using type = StringBtreeDefaultLess; }; template <> -struct key_compare_to_adapter> { +struct key_compare_adapter, absl::Cord> { using type = StringBtreeDefaultGreater; }; +// Detects an 'absl_btree_prefer_linear_node_search' member. This is +// a protocol used as an opt-in or opt-out of linear search. +// +// For example, this would be useful for key types that wrap an integer +// and define their own cheap operator<(). For example: +// +// class K { +// public: +// using absl_btree_prefer_linear_node_search = std::true_type; +// ... +// private: +// friend bool operator<(K a, K b) { return a.k_ < b.k_; } +// int k_; +// }; +// +// btree_map m; // Uses linear search +// +// If T has the preference tag, then it has a preference. +// Btree will use the tag's truth value. +template +struct has_linear_node_search_preference : std::false_type {}; +template +struct prefers_linear_node_search : std::false_type {}; +template +struct has_linear_node_search_preference< + T, absl::void_t> + : std::true_type {}; +template +struct prefers_linear_node_search< + T, absl::void_t> + : T::absl_btree_prefer_linear_node_search {}; + +template +constexpr bool compare_has_valid_result_type() { + using compare_result_type = compare_result_t; + return std::is_same::value || + std::is_convertible::value; +} + template struct common_params { + using original_key_compare = Compare; + // If Compare is a common comparator for a string-like type, then we adapt it // to use heterogeneous lookup and to be a key-compare-to comparator. - using key_compare = typename key_compare_to_adapter::type; + // We also adapt the comparator to diagnose invalid comparators in debug mode. + // We disable this when `Compare` is invalid in a way that will cause + // adaptation to fail (having invalid return type) so that we can give a + // better compilation failure in static_assert_validation. If we don't do + // this, then there will be cascading compilation failures that are confusing + // for users. + using key_compare = + absl::conditional_t(), + Compare, + typename key_compare_adapter::type>; + + static constexpr bool kIsKeyCompareStringAdapted = + std::is_same::value || + std::is_same::value; + static constexpr bool kIsKeyCompareTransparent = + IsTransparent::value || + kIsKeyCompareStringAdapted; + static constexpr bool kEnableGenerations = +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + true; +#else + false; +#endif + // A type which indicates if we have a key-compare-to functor or a plain old // key-compare functor. using is_key_compare_to = btree_is_key_compare_to; using allocator_type = Alloc; using key_type = Key; - using size_type = std::make_signed::type; + using size_type = size_t; using difference_type = ptrdiff_t; - // True if this is a multiset or multimap. - using is_multi_container = std::integral_constant; - using slot_policy = SlotPolicy; using slot_type = typename slot_policy::slot_type; using value_type = typename slot_policy::value_type; @@ -210,20 +384,35 @@ struct common_params { using reference = value_type &; using const_reference = const value_type &; + // For the given lookup key type, returns whether we can have multiple + // equivalent keys in the btree. If this is a multi-container, then we can. + // Otherwise, we can have multiple equivalent keys only if all of the + // following conditions are met: + // - The comparator is transparent. + // - The lookup key type is not the same as key_type. + // - The comparator is not a StringBtreeDefault{Less,Greater} comparator + // that we know has the same equivalence classes for all lookup types. + template + constexpr static bool can_have_multiple_equivalent_keys() { + return Multi || (IsTransparent::value && + !std::is_same::value && + !kIsKeyCompareStringAdapted); + } + enum { kTargetNodeSize = TargetNodeSize, - // Upper bound for the available space for values. This is largest for leaf + // Upper bound for the available space for slots. This is largest for leaf // nodes, which have overhead of at least a pointer + 4 bytes (for storing // 3 field_types and an enum). - kNodeValueSpace = + kNodeSlotSpace = TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4), }; - // This is an integral type large enough to hold as many - // ValueSize-values as will fit a node of TargetNodeSize bytes. + // This is an integral type large enough to hold as many slots as will fit a + // node of TargetNodeSize bytes. using node_count_type = - absl::conditional_t<(kNodeValueSpace / sizeof(value_type) > + absl::conditional_t<(kNodeSlotSpace / sizeof(slot_type) > (std::numeric_limits::max)()), uint16_t, uint8_t>; // NOLINT @@ -255,106 +444,6 @@ struct common_params { static void move(Alloc *alloc, slot_type *src, slot_type *dest) { slot_policy::move(alloc, src, dest); } - static void move(Alloc *alloc, slot_type *first, slot_type *last, - slot_type *result) { - slot_policy::move(alloc, first, last, result); - } -}; - -// A parameters structure for holding the type parameters for a btree_map. -// Compare and Alloc should be nothrow copy-constructible. -template -struct map_params : common_params> { - using super_type = typename map_params::common_params; - using mapped_type = Data; - // This type allows us to move keys when it is safe to do so. It is safe - // for maps in which value_type and mutable_value_type are layout compatible. - using slot_policy = typename super_type::slot_policy; - using slot_type = typename super_type::slot_type; - using value_type = typename super_type::value_type; - using init_type = typename super_type::init_type; - - using key_compare = typename super_type::key_compare; - // Inherit from key_compare for empty base class optimization. - struct value_compare : private key_compare { - value_compare() = default; - explicit value_compare(const key_compare &cmp) : key_compare(cmp) {} - - template - auto operator()(const T &left, const U &right) const - -> decltype(std::declval()(left.first, right.first)) { - return key_compare::operator()(left.first, right.first); - } - }; - using is_map_container = std::true_type; - - static const Key &key(const value_type &value) { return value.first; } - static const Key &key(const init_type &init) { return init.first; } - static const Key &key(const slot_type *s) { return slot_policy::key(s); } - static mapped_type &value(value_type *value) { return value->second; } -}; - -// This type implements the necessary functions from the -// absl::container_internal::slot_type interface. -template -struct set_slot_policy { - using slot_type = Key; - using value_type = Key; - using mutable_value_type = Key; - - static value_type &element(slot_type *slot) { return *slot; } - static const value_type &element(const slot_type *slot) { return *slot; } - - template - static void construct(Alloc *alloc, slot_type *slot, Args &&... args) { - absl::allocator_traits::construct(*alloc, slot, - std::forward(args)...); - } - - template - static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { - absl::allocator_traits::construct(*alloc, slot, std::move(*other)); - } - - template - static void destroy(Alloc *alloc, slot_type *slot) { - absl::allocator_traits::destroy(*alloc, slot); - } - - template - static void swap(Alloc * /*alloc*/, slot_type *a, slot_type *b) { - using std::swap; - swap(*a, *b); - } - - template - static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) { - *dest = std::move(*src); - } - - template - static void move(Alloc *alloc, slot_type *first, slot_type *last, - slot_type *result) { - for (slot_type *src = first, *dest = result; src != last; ++src, ++dest) - move(alloc, src, dest); - } -}; - -// A parameters structure for holding the type parameters for a btree_set. -// Compare and Alloc should be nothrow copy-constructible. -template -struct set_params : common_params> { - using value_type = Key; - using slot_type = typename set_params::common_params::slot_type; - using value_compare = typename set_params::common_params::key_compare; - using is_map_container = std::false_type; - - static const Key &key(const value_type &value) { return value; } - static const Key &key(const slot_type *slot) { return *slot; } }; // An adapter class that converts a lower-bound compare into an upper-bound @@ -390,6 +479,10 @@ struct SearchResult { // useful information. template struct SearchResult { + SearchResult() {} + explicit SearchResult(V value) : value(value) {} + SearchResult(V value, MatchKind /*match*/) : value(value) {} + V value; static constexpr bool HasMatch() { return false; } @@ -402,10 +495,10 @@ struct SearchResult { template class btree_node { using is_key_compare_to = typename Params::is_key_compare_to; - using is_multi_container = typename Params::is_multi_container; using field_type = typename Params::node_count_type; using allocator_type = typename Params::allocator_type; using slot_type = typename Params::slot_type; + using original_key_compare = typename Params::original_key_compare; public: using params_type = Params; @@ -420,21 +513,35 @@ class btree_node { using difference_type = typename Params::difference_type; // Btree decides whether to use linear node search as follows: + // - If the comparator expresses a preference, use that. + // - If the key expresses a preference, use that. // - If the key is arithmetic and the comparator is std::less or // std::greater, choose linear. // - Otherwise, choose binary. // TODO(ezb): Might make sense to add condition(s) based on node-size. using use_linear_search = std::integral_constant< - bool, - std::is_arithmetic::value && - (std::is_same, key_compare>::value || - std::is_same, key_compare>::value)>; + bool, has_linear_node_search_preference::value + ? prefers_linear_node_search::value + : has_linear_node_search_preference::value + ? prefers_linear_node_search::value + : std::is_arithmetic::value && + (std::is_same, + original_key_compare>::value || + std::is_same, + original_key_compare>::value)>; - // This class is organized by gtl::Layout as if it had the following - // structure: + // This class is organized by absl::container_internal::Layout as if it had + // the following structure: // // A pointer to the node's parent. // btree_node *parent; // + // // When ABSL_BTREE_ENABLE_GENERATIONS is defined, we also have a + // // generation integer in order to check that when iterators are + // // used, they haven't been invalidated already. Only the generation on + // // the root is used, but we have one on each node because whether a node + // // is root or not can change. + // uint32_t generation; + // // // The position of the node in the node's parent. // field_type position; // // The index of the first populated value in `values`. @@ -445,23 +552,23 @@ class btree_node { // // is the same as the count of values. // field_type finish; // // The maximum number of values the node can hold. This is an integer in - // // [1, kNodeValues] for root leaf nodes, kNodeValues for non-root leaf + // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal - // // nodes (even though there are still kNodeValues values in the node). + // // nodes (even though there are still kNodeSlots values in the node). // // TODO(ezb): make max_count use only 4 bits and record log2(capacity) // // to free extra bits for is_root, etc. // field_type max_count; // // // The array of values. The capacity is `max_count` for leaf nodes and - // // kNodeValues for internal nodes. Only the values in + // // kNodeSlots for internal nodes. Only the values in // // [start, finish) have been initialized and are valid. // slot_type values[max_count]; // // // The array of child pointers. The keys in children[i] are all less // // than key(i). The keys in children[i + 1] are all greater than key(i). - // // There are 0 children for leaf nodes and kNodeValues + 1 children for + // // There are 0 children for leaf nodes and kNodeSlots + 1 children for // // internal nodes. - // btree_node *children[kNodeValues + 1]; + // btree_node *children[kNodeSlots + 1]; // // This class is only constructed by EmptyNodeType. Normally, pointers to the // layout above are allocated, cast to btree_node*, and de-allocated within @@ -481,59 +588,71 @@ class btree_node { btree_node() = default; private: - using layout_type = absl::container_internal::Layout; - constexpr static size_type SizeWithNValues(size_type n) { - return layout_type(/*parent*/ 1, - /*position, start, finish, max_count*/ 4, - /*values*/ n, - /*children*/ 0) + using layout_type = + absl::container_internal::Layout; + constexpr static size_type SizeWithNSlots(size_type n) { + return layout_type( + /*parent*/ 1, + /*generation*/ params_type::kEnableGenerations ? 1 : 0, + /*position, start, finish, max_count*/ 4, + /*slots*/ n, + /*children*/ 0) .AllocSize(); } - // A lower bound for the overhead of fields other than values in a leaf node. + // A lower bound for the overhead of fields other than slots in a leaf node. constexpr static size_type MinimumOverhead() { - return SizeWithNValues(1) - sizeof(value_type); + return SizeWithNSlots(1) - sizeof(slot_type); } // Compute how many values we can fit onto a leaf node taking into account // padding. - constexpr static size_type NodeTargetValues(const int begin, const int end) { + constexpr static size_type NodeTargetSlots(const int begin, const int end) { return begin == end ? begin - : SizeWithNValues((begin + end) / 2 + 1) > + : SizeWithNSlots((begin + end) / 2 + 1) > params_type::kTargetNodeSize - ? NodeTargetValues(begin, (begin + end) / 2) - : NodeTargetValues((begin + end) / 2 + 1, end); + ? NodeTargetSlots(begin, (begin + end) / 2) + : NodeTargetSlots((begin + end) / 2 + 1, end); } enum { kTargetNodeSize = params_type::kTargetNodeSize, - kNodeTargetValues = NodeTargetValues(0, params_type::kTargetNodeSize), + kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize), - // We need a minimum of 3 values per internal node in order to perform + // We need a minimum of 3 slots per internal node in order to perform // splitting (1 value for the two nodes involved in the split and 1 value - // propagated to the parent as the delimiter for the split). - kNodeValues = kNodeTargetValues >= 3 ? kNodeTargetValues : 3, + // propagated to the parent as the delimiter for the split). For performance + // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy + // of 1/3 (for a node, not a b-tree). + kMinNodeSlots = 4, + + kNodeSlots = + kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots, // The node is internal (i.e. is not a leaf node) if and only if `max_count` // has this value. kInternalNodeMaxCount = 0, }; - // Leaves can have less than kNodeValues values. - constexpr static layout_type LeafLayout(const int max_values = kNodeValues) { - return layout_type(/*parent*/ 1, - /*position, start, finish, max_count*/ 4, - /*values*/ max_values, - /*children*/ 0); + // Leaves can have less than kNodeSlots values. + constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) { + return layout_type( + /*parent*/ 1, + /*generation*/ params_type::kEnableGenerations ? 1 : 0, + /*position, start, finish, max_count*/ 4, + /*slots*/ slot_count, + /*children*/ 0); } constexpr static layout_type InternalLayout() { - return layout_type(/*parent*/ 1, - /*position, start, finish, max_count*/ 4, - /*values*/ kNodeValues, - /*children*/ kNodeValues + 1); + return layout_type( + /*parent*/ 1, + /*generation*/ params_type::kEnableGenerations ? 1 : 0, + /*position, start, finish, max_count*/ 4, + /*slots*/ kNodeSlots, + /*children*/ kNodeSlots + 1); } - constexpr static size_type LeafSize(const int max_values = kNodeValues) { - return LeafLayout(max_values).AllocSize(); + constexpr static size_type LeafSize(const int slot_count = kNodeSlots) { + return LeafLayout(slot_count).AllocSize(); } constexpr static size_type InternalSize() { return InternalLayout().AllocSize(); @@ -544,44 +663,47 @@ class btree_node { template inline typename layout_type::template ElementType *GetField() { // We assert that we don't read from values that aren't there. - assert(N < 3 || !leaf()); + assert(N < 4 || is_internal()); return InternalLayout().template Pointer(reinterpret_cast(this)); } template inline const typename layout_type::template ElementType *GetField() const { - assert(N < 3 || !leaf()); + assert(N < 4 || is_internal()); return InternalLayout().template Pointer( reinterpret_cast(this)); } void set_parent(btree_node *p) { *GetField<0>() = p; } - field_type &mutable_finish() { return GetField<1>()[2]; } - slot_type *slot(int i) { return &GetField<2>()[i]; } + field_type &mutable_finish() { return GetField<2>()[2]; } + slot_type *slot(int i) { return &GetField<3>()[i]; } slot_type *start_slot() { return slot(start()); } slot_type *finish_slot() { return slot(finish()); } - const slot_type *slot(int i) const { return &GetField<2>()[i]; } - void set_position(field_type v) { GetField<1>()[0] = v; } - void set_start(field_type v) { GetField<1>()[1] = v; } - void set_finish(field_type v) { GetField<1>()[2] = v; } + const slot_type *slot(int i) const { return &GetField<3>()[i]; } + void set_position(field_type v) { GetField<2>()[0] = v; } + void set_start(field_type v) { GetField<2>()[1] = v; } + void set_finish(field_type v) { GetField<2>()[2] = v; } // This method is only called by the node init methods. - void set_max_count(field_type v) { GetField<1>()[3] = v; } + void set_max_count(field_type v) { GetField<2>()[3] = v; } public: // Whether this is a leaf node or not. This value doesn't change after the // node is created. - bool leaf() const { return GetField<1>()[3] != kInternalNodeMaxCount; } + bool is_leaf() const { return GetField<2>()[3] != kInternalNodeMaxCount; } + // Whether this is an internal node or not. This value doesn't change after + // the node is created. + bool is_internal() const { return !is_leaf(); } // Getter for the position of this node in its parent. - field_type position() const { return GetField<1>()[0]; } + field_type position() const { return GetField<2>()[0]; } // Getter for the offset of the first value in the `values` array. field_type start() const { - // TODO(ezb): when floating storage is implemented, return GetField<1>()[1]; - assert(GetField<1>()[1] == 0); + // TODO(ezb): when floating storage is implemented, return GetField<2>()[1]; + assert(GetField<2>()[1] == 0); return 0; } // Getter for the offset after the last value in the `values` array. - field_type finish() const { return GetField<1>()[2]; } + field_type finish() const { return GetField<2>()[2]; } // Getters for the number of values stored in this node. field_type count() const { @@ -590,10 +712,10 @@ class btree_node { } field_type max_count() const { // Internal nodes have max_count==kInternalNodeMaxCount. - // Leaf nodes have max_count in [1, kNodeValues]. - const field_type max_count = GetField<1>()[3]; + // Leaf nodes have max_count in [1, kNodeSlots]. + const field_type max_count = GetField<2>()[3]; return max_count == field_type{kInternalNodeMaxCount} - ? field_type{kNodeValues} + ? field_type{kNodeSlots} : max_count; } @@ -602,21 +724,44 @@ class btree_node { // Getter for whether the node is the root of the tree. The parent of the // root of the tree is the leftmost node in the tree which is guaranteed to // be a leaf. - bool is_root() const { return parent()->leaf(); } + bool is_root() const { return parent()->is_leaf(); } void make_root() { assert(parent()->is_root()); + set_generation(parent()->generation()); set_parent(parent()->parent()); } + // Gets the root node's generation integer, which is the one used by the tree. + uint32_t *get_root_generation() const { + assert(params_type::kEnableGenerations); + const btree_node *curr = this; + for (; !curr->is_root(); curr = curr->parent()) continue; + return const_cast(&curr->GetField<1>()[0]); + } + + // Returns the generation for iterator validation. + uint32_t generation() const { + return params_type::kEnableGenerations ? *get_root_generation() : 0; + } + // Updates generation. Should only be called on a root node or during node + // initialization. + void set_generation(uint32_t generation) { + if (params_type::kEnableGenerations) GetField<1>()[0] = generation; + } + // Updates the generation. We do this whenever the node is mutated. + void next_generation() { + if (params_type::kEnableGenerations) ++*get_root_generation(); + } + // Getters for the key/value at position i in the node. const key_type &key(int i) const { return params_type::key(slot(i)); } reference value(int i) { return params_type::element(slot(i)); } const_reference value(int i) const { return params_type::element(slot(i)); } // Getters/setter for the child at position i in the node. - btree_node *child(int i) const { return GetField<3>()[i]; } + btree_node *child(int i) const { return GetField<4>()[i]; } btree_node *start_child() const { return child(start()); } - btree_node *&mutable_child(int i) { return GetField<3>()[i]; } + btree_node *&mutable_child(int i) { return GetField<4>()[i]; } void clear_child(int i) { absl::container_internal::SanitizerPoisonObject(&mutable_child(i)); } @@ -671,7 +816,7 @@ class btree_node { } ++s; } - return {s}; + return SearchResult{s}; } // Returns the position of the first value whose key is not less than k using @@ -706,7 +851,7 @@ class btree_node { e = mid; } } - return {s}; + return SearchResult{s}; } // Returns the position of the first value whose key is not less than k using @@ -715,7 +860,7 @@ class btree_node { SearchResult binary_search_impl( const K &k, int s, int e, const CompareTo &comp, std::true_type /* IsCompareTo */) const { - if (is_multi_container::value) { + if (params_type::template can_have_multiple_equivalent_keys()) { MatchKind exact_match = MatchKind::kNe; while (s != e) { const int mid = (s + e) >> 1; @@ -726,14 +871,14 @@ class btree_node { e = mid; if (c == 0) { // Need to return the first value whose key is not less than k, - // which requires continuing the binary search if this is a - // multi-container. + // which requires continuing the binary search if there could be + // multiple equivalent keys. exact_match = MatchKind::kEq; } } } return {s, exact_match}; - } else { // Not a multi-container. + } else { // Can't have multiple equivalent keys. while (s != e) { const int mid = (s + e) >> 1; const absl::weak_ordering c = comp(key(mid), k); @@ -754,14 +899,10 @@ class btree_node { template void emplace_value(size_type i, allocator_type *alloc, Args &&... args); - // Removes the value at position i, shifting all existing values and children - // at positions > i to the left by 1. - void remove_value(int i, allocator_type *alloc); - - // Removes the values at positions [i, i + to_erase), shifting all values - // after that range to the left by to_erase. Does not change children at all. - void remove_values_ignore_children(int i, int to_erase, - allocator_type *alloc); + // Removes the values at positions [i, i + to_erase), shifting all existing + // values and children after that range to the left by to_erase. Clears all + // children between [i, i + to_erase). + void remove_values(field_type i, field_type to_erase, allocator_type *alloc); // Rebalances a node with its right sibling. void rebalance_right_to_left(int to_move, btree_node *right, @@ -773,11 +914,12 @@ class btree_node { void split(int insert_position, btree_node *dest, allocator_type *alloc); // Merges a node with its right sibling, moving all of the values and the - // delimiting key in the parent node onto itself. + // delimiting key in the parent node onto itself, and deleting the src node. void merge(btree_node *src, allocator_type *alloc); // Node allocation/deletion routines. - void init_leaf(btree_node *parent, int max_count) { + void init_leaf(int max_count, btree_node *parent) { + set_generation(0); set_parent(parent); set_position(0); set_start(0); @@ -787,78 +929,96 @@ class btree_node { start_slot(), max_count * sizeof(slot_type)); } void init_internal(btree_node *parent) { - init_leaf(parent, kNodeValues); + init_leaf(kNodeSlots, parent); // Set `max_count` to a sentinel value to indicate that this node is // internal. set_max_count(kInternalNodeMaxCount); absl::container_internal::SanitizerPoisonMemoryRegion( - &mutable_child(start()), (kNodeValues + 1) * sizeof(btree_node *)); - } - void destroy(allocator_type *alloc) { - for (int i = start(); i < finish(); ++i) { - value_destroy(i, alloc); - } + &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *)); } - public: - // Exposed only for tests. - static bool testonly_uses_linear_node_search() { - return use_linear_search::value; + static void deallocate(const size_type size, btree_node *node, + allocator_type *alloc) { + absl::container_internal::Deallocate(alloc, node, size); } + // Deletes a node and all of its children. + static void clear_and_delete(btree_node *node, allocator_type *alloc); + private: template - void value_init(const size_type i, allocator_type *alloc, Args &&... args) { + void value_init(const field_type i, allocator_type *alloc, Args &&... args) { + next_generation(); absl::container_internal::SanitizerUnpoisonObject(slot(i)); params_type::construct(alloc, slot(i), std::forward(args)...); } - void value_destroy(const size_type i, allocator_type *alloc) { + void value_destroy(const field_type i, allocator_type *alloc) { + next_generation(); params_type::destroy(alloc, slot(i)); absl::container_internal::SanitizerPoisonObject(slot(i)); } - - // Transfers value from slot `src_i` in `src` to slot `dest_i` in `this`. - void transfer(const size_type dest_i, const size_type src_i, btree_node *src, - allocator_type *alloc) { - absl::container_internal::SanitizerUnpoisonObject(slot(dest_i)); - params_type::transfer(alloc, slot(dest_i), src->slot(src_i)); - absl::container_internal::SanitizerPoisonObject(src->slot(src_i)); - } - - // Move n values starting at value i in this node into the values starting at - // value j in dest_node. - void uninitialized_move_n(const size_type n, const size_type i, - const size_type j, btree_node *dest_node, - allocator_type *alloc) { - absl::container_internal::SanitizerUnpoisonMemoryRegion( - dest_node->slot(j), n * sizeof(slot_type)); - for (slot_type *src = slot(i), *end = src + n, *dest = dest_node->slot(j); - src != end; ++src, ++dest) { - params_type::construct(alloc, dest, src); + void value_destroy_n(const field_type i, const field_type n, + allocator_type *alloc) { + next_generation(); + for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) { + params_type::destroy(alloc, s); + absl::container_internal::SanitizerPoisonObject(s); } } - // Destroys a range of n values, starting at index i. - void value_destroy_n(const size_type i, const size_type n, - allocator_type *alloc) { - for (int j = 0; j < n; ++j) { - value_destroy(i + j, alloc); + static void transfer(slot_type *dest, slot_type *src, allocator_type *alloc) { + absl::container_internal::SanitizerUnpoisonObject(dest); + params_type::transfer(alloc, dest, src); + absl::container_internal::SanitizerPoisonObject(src); + } + + // Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`. + void transfer(const size_type dest_i, const size_type src_i, + btree_node *src_node, allocator_type *alloc) { + next_generation(); + transfer(slot(dest_i), src_node->slot(src_i), alloc); + } + + // Transfers `n` values starting at value `src_i` in `src_node` into the + // values starting at value `dest_i` in `this`. + void transfer_n(const size_type n, const size_type dest_i, + const size_type src_i, btree_node *src_node, + allocator_type *alloc) { + next_generation(); + for (slot_type *src = src_node->slot(src_i), *end = src + n, + *dest = slot(dest_i); + src != end; ++src, ++dest) { + transfer(dest, src, alloc); + } + } + + // Same as above, except that we start at the end and work our way to the + // beginning. + void transfer_n_backward(const size_type n, const size_type dest_i, + const size_type src_i, btree_node *src_node, + allocator_type *alloc) { + next_generation(); + for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n, + *dest = slot(dest_i + n - 1); + src != end; --src, --dest) { + transfer(dest, src, alloc); } } template friend class btree; template - friend struct btree_iterator; + friend class btree_iterator; friend class BtreeNodePeer; + friend struct btree_access; }; template -struct btree_iterator { - private: +class btree_iterator { using key_type = typename Node::key_type; using size_type = typename Node::size_type; using params_type = typename Node::params_type; + using is_map_container = typename params_type::is_map_container; using node_type = Node; using normal_node = typename std::remove_const::type; @@ -870,7 +1030,7 @@ struct btree_iterator { using slot_type = typename params_type::slot_type; using iterator = - btree_iterator; + btree_iterator; using const_iterator = btree_iterator; @@ -882,66 +1042,51 @@ struct btree_iterator { using reference = Reference; using iterator_category = std::bidirectional_iterator_tag; - btree_iterator() : node(nullptr), position(-1) {} - explicit btree_iterator(Node *n) : node(n), position(n->start()) {} - btree_iterator(Node *n, int p) : node(n), position(p) {} + btree_iterator() : btree_iterator(nullptr, -1) {} + explicit btree_iterator(Node *n) : btree_iterator(n, n->start()) {} + btree_iterator(Node *n, int p) : node_(n), position_(p) { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + // Use `~uint32_t{}` as a sentinel value for iterator generations so it + // doesn't match the initial value for the actual generation. + generation_ = n != nullptr ? n->generation() : ~uint32_t{}; +#endif + } // NOTE: this SFINAE allows for implicit conversions from iterator to - // const_iterator, but it specifically avoids defining copy constructors so - // that btree_iterator can be trivially copyable. This is for performance and - // binary size reasons. + // const_iterator, but it specifically avoids hiding the copy constructor so + // that the trivial one will be used when possible. template , iterator>::value && std::is_same::value, int> = 0> - btree_iterator(const btree_iterator &other) // NOLINT - : node(other.node), position(other.position) {} - - private: - // This SFINAE allows explicit conversions from const_iterator to - // iterator, but also avoids defining a copy constructor. - // NOTE: the const_cast is safe because this constructor is only called by - // non-const methods and the container owns the nodes. - template , const_iterator>::value && - std::is_same::value, - int> = 0> - explicit btree_iterator(const btree_iterator &other) - : node(const_cast(other.node)), position(other.position) {} - - // Increment/decrement the iterator. - void increment() { - if (node->leaf() && ++position < node->finish()) { - return; - } - increment_slow(); + btree_iterator(const btree_iterator other) // NOLINT + : node_(other.node_), position_(other.position_) { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + generation_ = other.generation_; +#endif } - void increment_slow(); - void decrement() { - if (node->leaf() && --position >= node->start()) { - return; - } - decrement_slow(); + bool operator==(const iterator &other) const { + return node_ == other.node_ && position_ == other.position_; } - void decrement_slow(); - - public: bool operator==(const const_iterator &other) const { - return node == other.node && position == other.position; + return node_ == other.node_ && position_ == other.position_; + } + bool operator!=(const iterator &other) const { + return node_ != other.node_ || position_ != other.position_; } bool operator!=(const const_iterator &other) const { - return node != other.node || position != other.position; + return node_ != other.node_ || position_ != other.position_; } // Accessors for the key/value the iterator is pointing at. reference operator*() const { - ABSL_HARDENING_ASSERT(node != nullptr); - ABSL_HARDENING_ASSERT(node->start() <= position); - ABSL_HARDENING_ASSERT(node->finish() > position); - return node->value(position); + ABSL_HARDENING_ASSERT(node_ != nullptr); + ABSL_HARDENING_ASSERT(node_->start() <= position_); + ABSL_HARDENING_ASSERT(node_->finish() > position_); + assert_valid_generation(); + return node_->value(position_); } pointer operator->() const { return &operator*(); } @@ -965,6 +1110,8 @@ struct btree_iterator { } private: + friend iterator; + friend const_iterator; template friend class btree; template @@ -975,32 +1122,95 @@ struct btree_iterator { friend class btree_map_container; template friend class btree_multiset_container; - template - friend struct btree_iterator; template friend class base_checker; + friend struct btree_access; - const key_type &key() const { return node->key(position); } - slot_type *slot() { return node->slot(position); } + // This SFINAE allows explicit conversions from const_iterator to + // iterator, but also avoids hiding the copy constructor. + // NOTE: the const_cast is safe because this constructor is only called by + // non-const methods and the container owns the nodes. + template , const_iterator>::value && + std::is_same::value, + int> = 0> + explicit btree_iterator(const btree_iterator other) + : node_(const_cast(other.node_)), + position_(other.position_) { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + generation_ = other.generation_; +#endif + } + + // Increment/decrement the iterator. + void increment() { + assert_valid_generation(); + if (node_->is_leaf() && ++position_ < node_->finish()) { + return; + } + increment_slow(); + } + void increment_slow(); + + void decrement() { + assert_valid_generation(); + if (node_->is_leaf() && --position_ >= node_->start()) { + return; + } + decrement_slow(); + } + void decrement_slow(); + + // Updates the generation. For use internally right before we return an + // iterator to the user. + void update_generation() { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + if (node_ != nullptr) generation_ = node_->generation(); +#endif + } + + const key_type &key() const { return node_->key(position_); } + slot_type *slot() { return node_->slot(position_); } + + void assert_valid_generation() const { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + if (node_ != nullptr && node_->generation() != generation_) { + ABSL_INTERNAL_LOG( + FATAL, + "Attempting to use an invalidated iterator. The corresponding b-tree " + "container has been mutated since this iterator was constructed."); + } +#endif + } // The node in the tree the iterator is pointing at. - Node *node; + Node *node_; // The position within the node of the tree the iterator is pointing at. // NOTE: this is an int rather than a field_type because iterators can point // to invalid positions (such as -1) in certain circumstances. - int position; + int position_; +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + // Used to check that the iterator hasn't been invalidated. + uint32_t generation_; +#endif }; template class btree { using node_type = btree_node; using is_key_compare_to = typename Params::is_key_compare_to; + using init_type = typename Params::init_type; + using field_type = typename node_type::field_type; // We use a static empty node for the root/leftmost/rightmost of empty btrees // in order to avoid branching in begin()/end(). struct alignas(node_type::Alignment()) EmptyNodeType : node_type { using field_type = typename node_type::field_type; node_type *parent; +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + uint32_t generation = 0; +#endif field_type position = 0; field_type start = 0; field_type finish = 0; @@ -1029,9 +1239,9 @@ class btree { #endif } - enum { - kNodeValues = node_type::kNodeValues, - kMinNodeValues = kNodeValues / 2, + enum : uint32_t { + kNodeSlots = node_type::kNodeSlots, + kMinNodeValues = kNodeSlots / 2, }; struct node_stats { @@ -1055,13 +1265,15 @@ class btree { using size_type = typename Params::size_type; using difference_type = typename Params::difference_type; using key_compare = typename Params::key_compare; + using original_key_compare = typename Params::original_key_compare; using value_compare = typename Params::value_compare; using allocator_type = typename Params::allocator_type; using reference = typename Params::reference; using const_reference = typename Params::const_reference; using pointer = typename Params::pointer; using const_pointer = typename Params::const_pointer; - using iterator = btree_iterator; + using iterator = + typename btree_iterator::iterator; using const_iterator = typename iterator::const_iterator; using reverse_iterator = std::reverse_iterator; using const_reverse_iterator = std::reverse_iterator; @@ -1074,28 +1286,46 @@ class btree { private: // For use in copy_or_move_values_in_order. const value_type &maybe_move_from_iterator(const_iterator it) { return *it; } - value_type &&maybe_move_from_iterator(iterator it) { return std::move(*it); } + value_type &&maybe_move_from_iterator(iterator it) { + // This is a destructive operation on the other container so it's safe for + // us to const_cast and move from the keys here even if it's a set. + return std::move(const_cast(*it)); + } // Copies or moves (depending on the template parameter) the values in // other into this btree in their order in other. This btree must be empty // before this method is called. This method is used in copy construction, // copy assignment, and move assignment. template - void copy_or_move_values_in_order(Btree *other); + void copy_or_move_values_in_order(Btree &other); // Validates that various assumptions/requirements are true at compile time. constexpr static bool static_assert_validation(); public: - btree(const key_compare &comp, const allocator_type &alloc); + btree(const key_compare &comp, const allocator_type &alloc) + : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {} - btree(const btree &other); + btree(const btree &other) : btree(other, other.allocator()) {} + btree(const btree &other, const allocator_type &alloc) + : btree(other.key_comp(), alloc) { + copy_or_move_values_in_order(other); + } btree(btree &&other) noexcept : root_(std::move(other.root_)), rightmost_(absl::exchange(other.rightmost_, EmptyNode())), size_(absl::exchange(other.size_, 0)) { other.mutable_root() = EmptyNode(); } + btree(btree &&other, const allocator_type &alloc) + : btree(other.key_comp(), alloc) { + if (alloc == other.allocator()) { + swap(other); + } else { + // Move values from `other` one at a time when allocators are different. + copy_or_move_values_in_order(other); + } + } ~btree() { // Put static_asserts in destructor to avoid triggering them before the type @@ -1123,17 +1353,22 @@ class btree { return const_reverse_iterator(begin()); } - // Finds the first element whose key is not less than key. + // Finds the first element whose key is not less than `key`. template iterator lower_bound(const K &key) { - return internal_end(internal_lower_bound(key)); + return internal_end(internal_lower_bound(key).value); } template const_iterator lower_bound(const K &key) const { - return internal_end(internal_lower_bound(key)); + return internal_end(internal_lower_bound(key).value); } - // Finds the first element whose key is greater than key. + // Finds the first element whose key is not less than `key` and also returns + // whether that element is equal to `key`. + template + std::pair lower_bound_equal(const K &key) const; + + // Finds the first element whose key is greater than `key`. template iterator upper_bound(const K &key) { return internal_end(internal_upper_bound(key)); @@ -1144,23 +1379,21 @@ class btree { } // Finds the range of values which compare equal to key. The first member of - // the returned pair is equal to lower_bound(key). The second member pair of - // the pair is equal to upper_bound(key). + // the returned pair is equal to lower_bound(key). The second member of the + // pair is equal to upper_bound(key). template - std::pair equal_range(const K &key) { - return {lower_bound(key), upper_bound(key)}; - } + std::pair equal_range(const K &key); template std::pair equal_range(const K &key) const { - return {lower_bound(key), upper_bound(key)}; + return const_cast(this)->equal_range(key); } // Inserts a value into the btree only if it does not already exist. The // boolean return value indicates whether insertion succeeded or failed. // Requirement: if `key` already exists in the btree, does not consume `args`. // Requirement: `key` is never referenced after consuming `args`. - template - std::pair insert_unique(const key_type &key, Args &&... args); + template + std::pair insert_unique(const K &key, Args &&... args); // Inserts with hint. Checks to see if the value should be placed immediately // before `position` in the tree. If so, then the insertion will take @@ -1168,14 +1401,23 @@ class btree { // logarithmic time as if a call to insert_unique() were made. // Requirement: if `key` already exists in the btree, does not consume `args`. // Requirement: `key` is never referenced after consuming `args`. - template + template std::pair insert_hint_unique(iterator position, - const key_type &key, + const K &key, Args &&... args); // Insert a range of values into the btree. + // Note: the first overload avoids constructing a value_type if the key + // already exists in the btree. + template ()( + params_type::key(*std::declval()), + std::declval()))> + void insert_iterator_unique(InputIterator b, InputIterator e, int); + // We need the second overload for cases in which we need to construct a + // value_type in order to compare it with the keys already in the btree. template - void insert_iterator_unique(InputIterator b, InputIterator e); + void insert_iterator_unique(InputIterator b, InputIterator e, char); // Inserts a value into the btree. template @@ -1208,18 +1450,8 @@ class btree { // to the element after the last erased element. std::pair erase_range(iterator begin, iterator end); - // Erases the specified key from the btree. Returns 1 if an element was - // erased and 0 otherwise. - template - size_type erase_unique(const K &key); - - // Erases all of the entries matching the specified key from the - // btree. Returns the number of elements erased. - template - size_type erase_multi(const K &key); - - // Finds the iterator corresponding to a key or returns end() if the key is - // not present. + // Finds an element with key equivalent to `key` or returns `end()` if `key` + // is not present. template iterator find(const K &key) { return internal_end(internal_find(key)); @@ -1229,23 +1461,6 @@ class btree { return internal_end(internal_find(key)); } - // Returns a count of the number of times the key appears in the btree. - template - size_type count_unique(const K &key) const { - const iterator begin = internal_find(key); - if (begin.node == nullptr) { - // The key doesn't exist in the tree. - return 0; - } - return 1; - } - // Returns a count of the number of times the key appears in the btree. - template - size_type count_multi(const K &key) const { - const auto range = equal_range(key); - return std::distance(range.first, range.second); - } - // Clear the btree, deleting all of the values it contains. void clear(); @@ -1260,7 +1475,9 @@ class btree { return compare_internal::compare_result_as_less_than(key_comp()(a, b)); } - value_compare value_comp() const { return value_compare(key_comp()); } + value_compare value_comp() const { + return value_compare(original_key_compare(key_comp())); + } // Verifies the structure of the btree. void verify() const; @@ -1298,6 +1515,7 @@ class btree { } // The total number of bytes used by the btree. + // TODO(b/169338300): update to support node_btree_*. size_type bytes_used() const { node_stats stats = internal_stats(root()); if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) { @@ -1308,12 +1526,14 @@ class btree { } } - // The average number of bytes used per value stored in the btree. + // The average number of bytes used per value stored in the btree assuming + // random insertion order. static double average_bytes_per_value() { - // Returns the number of bytes per value on a leaf node that is 75% - // full. Experimentally, this matches up nicely with the computed number of - // bytes per value in trees that had their values inserted in random order. - return node_type::LeafSize() / (kNodeValues * 0.75); + // The expected number of values per node with random insertion order is the + // average of the maximum and minimum numbers of values per node. + const double expected_values_per_node = + (kNodeSlots + kMinNodeValues) / 2.0; + return node_type::LeafSize() / expected_values_per_node; } // The fullness of the btree. Computed as the number of elements in the btree @@ -1323,7 +1543,7 @@ class btree { // Returns 0 for empty trees. double fullness() const { if (empty()) return 0.0; - return static_cast(size()) / (nodes() * kNodeValues); + return static_cast(size()) / (nodes() * kNodeSlots); } // The overhead of the btree structure in bytes per node. Computed as the // total number of bytes used by the btree minus the number of bytes used for @@ -1339,6 +1559,8 @@ class btree { allocator_type get_allocator() const { return allocator(); } private: + friend struct btree_access; + // Internal accessor routines. node_type *root() { return root_.template get<2>(); } const node_type *root() const { return root_.template get<2>(); } @@ -1373,35 +1595,18 @@ class btree { } node_type *new_leaf_node(node_type *parent) { node_type *n = allocate(node_type::LeafSize()); - n->init_leaf(parent, kNodeValues); + n->init_leaf(kNodeSlots, parent); return n; } node_type *new_leaf_root_node(const int max_count) { node_type *n = allocate(node_type::LeafSize(max_count)); - n->init_leaf(/*parent=*/n, max_count); + n->init_leaf(max_count, /*parent=*/n); return n; } // Deletion helper routines. - void erase_same_node(iterator begin, iterator end); - iterator erase_from_leaf_node(iterator begin, size_type to_erase); iterator rebalance_after_delete(iterator iter); - // Deallocates a node of a certain size in bytes using the allocator. - void deallocate(const size_type size, node_type *node) { - absl::container_internal::Deallocate( - mutable_allocator(), node, size); - } - - void delete_internal_node(node_type *node) { - node->destroy(mutable_allocator()); - deallocate(node_type::InternalSize(), node); - } - void delete_leaf_node(node_type *node) { - node->destroy(mutable_allocator()); - deallocate(node_type::LeafSize(node->max_count()), node); - } - // Rebalances or splits the node iter points to. void rebalance_or_split(iterator *iter); @@ -1419,10 +1624,10 @@ class btree { void try_shrink(); iterator internal_end(iterator iter) { - return iter.node != nullptr ? iter : end(); + return iter.node_ != nullptr ? iter : end(); } const_iterator internal_end(const_iterator iter) const { - return iter.node != nullptr ? iter : end(); + return iter.node_ != nullptr ? iter : end(); } // Emplaces a value into the btree immediately before iter. Requires that @@ -1432,35 +1637,25 @@ class btree { // Returns an iterator pointing to the first value >= the value "iter" is // pointing at. Note that "iter" might be pointing to an invalid location such - // as iter.position == iter.node->finish(). This routine simply moves iter up - // in the tree to a valid location. - // Requires: iter.node is non-null. + // as iter.position_ == iter.node_->finish(). This routine simply moves iter + // up in the tree to a valid location. Requires: iter.node_ is non-null. template static IterType internal_last(IterType iter); // Returns an iterator pointing to the leaf position at which key would - // reside in the tree. We provide 2 versions of internal_locate. The first - // version uses a less-than comparator and is incapable of distinguishing when - // there is an exact match. The second version is for the key-compare-to - // specialization and distinguishes exact matches. The key-compare-to - // specialization allows the caller to avoid a subsequent comparison to - // determine if an exact match was made, which is important for keys with - // expensive comparison, such as strings. + // reside in the tree, unless there is an exact match - in which case, the + // result may not be on a leaf. When there's a three-way comparator, we can + // return whether there was an exact match. This allows the caller to avoid a + // subsequent comparison to determine if an exact match was made, which is + // important for keys with expensive comparison, such as strings. template SearchResult internal_locate( const K &key) const; - template - SearchResult internal_locate_impl( - const K &key, std::false_type /* IsCompareTo */) const; - - template - SearchResult internal_locate_impl( - const K &key, std::true_type /* IsCompareTo */) const; - // Internal routine which implements lower_bound(). template - iterator internal_lower_bound(const K &key) const; + SearchResult internal_lower_bound( + const K &key) const; // Internal routine which implements upper_bound(). template @@ -1470,9 +1665,6 @@ class btree { template iterator internal_find(const K &key) const; - // Deletes a node and all of its children. - void internal_clear(node_type *node); - // Verifies the tree structure of node. int internal_verify(const node_type *node, const key_type *lo, const key_type *hi) const; @@ -1482,7 +1674,7 @@ class btree { if (node == nullptr || (node == root() && empty())) { return node_stats(0, 0); } - if (node->leaf()) { + if (node->is_leaf()) { return node_stats(1, 0); } node_stats res(0, 1); @@ -1492,13 +1684,6 @@ class btree { return res; } - public: - // Exposed only for tests. - static bool testonly_uses_linear_node_search() { - return node_type::testonly_uses_linear_node_search(); - } - - private: // We use compressed tuple in order to save space because key_compare and // allocator_type are usually empty. absl::container_internal::CompressedTuple::emplace_value(const size_type i, // Shift old values to create space for new value and then construct it in // place. if (i < finish()) { - value_init(finish(), alloc, slot(finish() - 1)); - for (size_type j = finish() - 1; j > i; --j) - params_type::move(alloc, slot(j - 1), slot(j)); - value_destroy(i, alloc); + transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this, + alloc); } value_init(i, alloc, std::forward(args)...); set_finish(finish() + 1); - if (!leaf() && finish() > i + 1) { - for (int j = finish(); j > i + 1; --j) { + if (is_internal() && finish() > i + 1) { + for (field_type j = finish(); j > i + 1; --j) { set_child(j, child(j - 1)); } clear_child(i + 1); @@ -1542,24 +1725,27 @@ inline void btree_node

::emplace_value(const size_type i, } template -inline void btree_node

::remove_value(const int i, allocator_type *alloc) { - if (!leaf() && finish() > i + 1) { - assert(child(i + 1)->count() == 0); - for (size_type j = i + 1; j < finish(); ++j) { - set_child(j, child(j + 1)); +inline void btree_node

::remove_values(const field_type i, + const field_type to_erase, + allocator_type *alloc) { + // Transfer values after the removed range into their new places. + value_destroy_n(i, to_erase, alloc); + const field_type orig_finish = finish(); + const field_type src_i = i + to_erase; + transfer_n(orig_finish - src_i, i, src_i, this, alloc); + + if (is_internal()) { + // Delete all children between begin and end. + for (int j = 0; j < to_erase; ++j) { + clear_and_delete(child(i + j + 1), alloc); + } + // Rotate children after end into new positions. + for (int j = i + to_erase + 1; j <= orig_finish; ++j) { + set_child(j - to_erase, child(j)); + clear_child(j); } - clear_child(finish()); } - - remove_values_ignore_children(i, /*to_erase=*/1, alloc); -} - -template -inline void btree_node

::remove_values_ignore_children( - const int i, const int to_erase, allocator_type *alloc) { - params_type::move(alloc, slot(i + to_erase), finish_slot(), slot(i)); - value_destroy_n(finish() - to_erase, to_erase, alloc); - set_finish(finish() - to_erase); + set_finish(orig_finish - to_erase); } template @@ -1573,24 +1759,19 @@ void btree_node

::rebalance_right_to_left(const int to_move, assert(to_move <= right->count()); // 1) Move the delimiting value in the parent to the left node. - value_init(finish(), alloc, parent()->slot(position())); + transfer(finish(), position(), parent(), alloc); // 2) Move the (to_move - 1) values from the right node to the left node. - right->uninitialized_move_n(to_move - 1, right->start(), finish() + 1, this, - alloc); + transfer_n(to_move - 1, finish() + 1, right->start(), right, alloc); // 3) Move the new delimiting value to the parent from the right node. - params_type::move(alloc, right->slot(to_move - 1), - parent()->slot(position())); + parent()->transfer(position(), right->start() + to_move - 1, right, alloc); - // 4) Shift the values in the right node to their correct position. - params_type::move(alloc, right->slot(to_move), right->finish_slot(), - right->start_slot()); + // 4) Shift the values in the right node to their correct positions. + right->transfer_n(right->count() - to_move, right->start(), + right->start() + to_move, right, alloc); - // 5) Destroy the now-empty to_move entries in the right node. - right->value_destroy_n(right->finish() - to_move, to_move, alloc); - - if (!leaf()) { + if (is_internal()) { // Move the child pointers from the right to the left node. for (int i = 0; i < to_move; ++i) { init_child(finish() + i + 1, right->child(i)); @@ -1623,56 +1804,21 @@ void btree_node

::rebalance_left_to_right(const int to_move, // Lastly, a new delimiting value is moved from the left node into the // parent, and the remaining empty left node entries are destroyed. - if (right->count() >= to_move) { - // The original location of the right->count() values are sufficient to hold - // the new to_move entries from the parent and left node. + // 1) Shift existing values in the right node to their correct positions. + right->transfer_n_backward(right->count(), right->start() + to_move, + right->start(), right, alloc); - // 1) Shift existing values in the right node to their correct positions. - right->uninitialized_move_n(to_move, right->finish() - to_move, - right->finish(), right, alloc); - for (slot_type *src = right->slot(right->finish() - to_move - 1), - *dest = right->slot(right->finish() - 1), - *end = right->start_slot(); - src >= end; --src, --dest) { - params_type::move(alloc, src, dest); - } + // 2) Move the delimiting value in the parent to the right node. + right->transfer(right->start() + to_move - 1, position(), parent(), alloc); - // 2) Move the delimiting value in the parent to the right node. - params_type::move(alloc, parent()->slot(position()), - right->slot(to_move - 1)); - - // 3) Move the (to_move - 1) values from the left node to the right node. - params_type::move(alloc, slot(finish() - (to_move - 1)), finish_slot(), - right->start_slot()); - } else { - // The right node does not have enough initialized space to hold the new - // to_move entries, so part of them will move to uninitialized space. - - // 1) Shift existing values in the right node to their correct positions. - right->uninitialized_move_n(right->count(), right->start(), - right->start() + to_move, right, alloc); - - // 2) Move the delimiting value in the parent to the right node. - right->value_init(to_move - 1, alloc, parent()->slot(position())); - - // 3) Move the (to_move - 1) values from the left node to the right node. - const size_type uninitialized_remaining = to_move - right->count() - 1; - uninitialized_move_n(uninitialized_remaining, - finish() - uninitialized_remaining, right->finish(), - right, alloc); - params_type::move(alloc, slot(finish() - (to_move - 1)), - slot(finish() - uninitialized_remaining), - right->start_slot()); - } + // 3) Move the (to_move - 1) values from the left node to the right node. + right->transfer_n(to_move - 1, right->start(), finish() - (to_move - 1), this, + alloc); // 4) Move the new delimiting value to the parent from the left node. - params_type::move(alloc, slot(finish() - to_move), - parent()->slot(position())); + parent()->transfer(position(), finish() - to_move, this, alloc); - // 5) Destroy the now-empty to_move entries in the left node. - value_destroy_n(finish() - to_move, to_move, alloc); - - if (!leaf()) { + if (is_internal()) { // Move the child pointers from the left to the right node. for (int i = right->finish(); i >= right->start(); --i) { right->init_child(i + to_move, right->child(i)); @@ -1693,7 +1839,7 @@ template void btree_node

::split(const int insert_position, btree_node *dest, allocator_type *alloc) { assert(dest->count() == 0); - assert(max_count() == kNodeValues); + assert(max_count() == kNodeSlots); // We bias the split based on the position being inserted. If we're // inserting at the beginning of the left node then bias the split to put @@ -1701,7 +1847,7 @@ void btree_node

::split(const int insert_position, btree_node *dest, // right node then bias the split to put more values on the left node. if (insert_position == start()) { dest->set_finish(dest->start() + finish() - 1); - } else if (insert_position == kNodeValues) { + } else if (insert_position == kNodeSlots) { dest->set_finish(dest->start()); } else { dest->set_finish(dest->start() + count() / 2); @@ -1710,10 +1856,7 @@ void btree_node

::split(const int insert_position, btree_node *dest, assert(count() >= 1); // Move values from the left sibling to the right sibling. - uninitialized_move_n(dest->count(), finish(), dest->start(), dest, alloc); - - // Destroy the now-empty entries in the left node. - value_destroy_n(finish(), dest->count(), alloc); + dest->transfer_n(dest->count(), dest->start(), finish(), this, alloc); // The split key is the largest value in the left sibling. --mutable_finish(); @@ -1721,7 +1864,7 @@ void btree_node

::split(const int insert_position, btree_node *dest, value_destroy(finish(), alloc); parent()->init_child(position() + 1, dest); - if (!leaf()) { + if (is_internal()) { for (int i = dest->start(), j = finish() + 1; i <= dest->finish(); ++i, ++j) { assert(child(j) != nullptr); @@ -1740,13 +1883,9 @@ void btree_node

::merge(btree_node *src, allocator_type *alloc) { value_init(finish(), alloc, parent()->slot(position())); // Move the values from the right to the left node. - src->uninitialized_move_n(src->count(), src->start(), finish() + 1, this, - alloc); + transfer_n(src->count(), finish() + 1, src->start(), src, alloc); - // Destroy the now-empty entries in the right node. - src->value_destroy_n(src->start(), src->count(), alloc); - - if (!leaf()) { + if (is_internal()) { // Move the child pointers from the right to the left node. for (int i = src->start(), j = finish() + 1; i <= src->finish(); ++i, ++j) { init_child(j, src->child(i)); @@ -1758,57 +1897,124 @@ void btree_node

::merge(btree_node *src, allocator_type *alloc) { set_finish(start() + 1 + count() + src->count()); src->set_finish(src->start()); - // Remove the value on the parent node. - parent()->remove_value(position(), alloc); + // Remove the value on the parent node and delete the src node. + parent()->remove_values(position(), /*to_erase=*/1, alloc); +} + +template +void btree_node

::clear_and_delete(btree_node *node, allocator_type *alloc) { + if (node->is_leaf()) { + node->value_destroy_n(node->start(), node->count(), alloc); + deallocate(LeafSize(node->max_count()), node, alloc); + return; + } + if (node->count() == 0) { + deallocate(InternalSize(), node, alloc); + return; + } + + // The parent of the root of the subtree we are deleting. + btree_node *delete_root_parent = node->parent(); + + // Navigate to the leftmost leaf under node, and then delete upwards. + while (node->is_internal()) node = node->start_child(); +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + // When generations are enabled, we delete the leftmost leaf last in case it's + // the parent of the root and we need to check whether it's a leaf before we + // can update the root's generation. + // TODO(ezb): if we change btree_node::is_root to check a bool inside the node + // instead of checking whether the parent is a leaf, we can remove this logic. + btree_node *leftmost_leaf = node; +#endif + // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which + // isn't guaranteed to be a valid `field_type`. + int pos = node->position(); + btree_node *parent = node->parent(); + for (;;) { + // In each iteration of the next loop, we delete one leaf node and go right. + assert(pos <= parent->finish()); + do { + node = parent->child(pos); + if (node->is_internal()) { + // Navigate to the leftmost leaf under node. + while (node->is_internal()) node = node->start_child(); + pos = node->position(); + parent = node->parent(); + } + node->value_destroy_n(node->start(), node->count(), alloc); +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + if (leftmost_leaf != node) +#endif + deallocate(LeafSize(node->max_count()), node, alloc); + ++pos; + } while (pos <= parent->finish()); + + // Once we've deleted all children of parent, delete parent and go up/right. + assert(pos > parent->finish()); + do { + node = parent; + pos = node->position(); + parent = node->parent(); + node->value_destroy_n(node->start(), node->count(), alloc); + deallocate(InternalSize(), node, alloc); + if (parent == delete_root_parent) { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + deallocate(LeafSize(leftmost_leaf->max_count()), leftmost_leaf, alloc); +#endif + return; + } + ++pos; + } while (pos > parent->finish()); + } } //// // btree_iterator methods template void btree_iterator::increment_slow() { - if (node->leaf()) { - assert(position >= node->finish()); + if (node_->is_leaf()) { + assert(position_ >= node_->finish()); btree_iterator save(*this); - while (position == node->finish() && !node->is_root()) { - assert(node->parent()->child(node->position()) == node); - position = node->position(); - node = node->parent(); + while (position_ == node_->finish() && !node_->is_root()) { + assert(node_->parent()->child(node_->position()) == node_); + position_ = node_->position(); + node_ = node_->parent(); } // TODO(ezb): assert we aren't incrementing end() instead of handling. - if (position == node->finish()) { + if (position_ == node_->finish()) { *this = save; } } else { - assert(position < node->finish()); - node = node->child(position + 1); - while (!node->leaf()) { - node = node->start_child(); + assert(position_ < node_->finish()); + node_ = node_->child(position_ + 1); + while (node_->is_internal()) { + node_ = node_->start_child(); } - position = node->start(); + position_ = node_->start(); } } template void btree_iterator::decrement_slow() { - if (node->leaf()) { - assert(position <= -1); + if (node_->is_leaf()) { + assert(position_ <= -1); btree_iterator save(*this); - while (position < node->start() && !node->is_root()) { - assert(node->parent()->child(node->position()) == node); - position = node->position() - 1; - node = node->parent(); + while (position_ < node_->start() && !node_->is_root()) { + assert(node_->parent()->child(node_->position()) == node_); + position_ = node_->position() - 1; + node_ = node_->parent(); } // TODO(ezb): assert we aren't decrementing begin() instead of handling. - if (position < node->start()) { + if (position_ < node_->start()) { *this = save; } } else { - assert(position >= node->start()); - node = node->child(position); - while (!node->leaf()) { - node = node->child(node->finish()); + assert(position_ >= node_->start()); + node_ = node_->child(position_); + while (node_->is_internal()) { + node_ = node_->child(node_->finish()); } - position = node->finish() - 1; + position_ = node_->finish() - 1; } } @@ -1816,7 +2022,7 @@ void btree_iterator::decrement_slow() { // btree methods template template -void btree

::copy_or_move_values_in_order(Btree *other) { +void btree

::copy_or_move_values_in_order(Btree &other) { static_assert(std::is_same::value || std::is_same::value, "Btree type must be same or const."); @@ -1824,11 +2030,11 @@ void btree

::copy_or_move_values_in_order(Btree *other) { // We can avoid key comparisons because we know the order of the // values is the same order we'll store them in. - auto iter = other->begin(); - if (iter == other->end()) return; + auto iter = other.begin(); + if (iter == other.end()) return; insert_multi(maybe_move_from_iterator(iter)); ++iter; - for (; iter != other->end(); ++iter) { + for (; iter != other.end(); ++iter) { // If the btree is not empty, we can just insert the new value at the end // of the tree. internal_emplace(end(), maybe_move_from_iterator(iter)); @@ -1847,19 +2053,16 @@ constexpr bool btree

::static_assert_validation() { // Note: We assert that kTargetValues, which is computed from // Params::kTargetNodeSize, must fit the node_type::field_type. static_assert( - kNodeValues < (1 << (8 * sizeof(typename node_type::field_type))), + kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))), "target node size too large"); // Verify that key_compare returns an absl::{weak,strong}_ordering or bool. - using compare_result_type = - absl::result_of_t; static_assert( - std::is_same::value || - std::is_convertible::value, + compare_has_valid_result_type(), "key comparison function must return absl::{weak,strong}_ordering or " "bool."); - // Test the assumption made in setting kNodeValueSpace. + // Test the assumption made in setting kNodeSlotSpace. static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4, "node space assumption incorrect"); @@ -1867,25 +2070,57 @@ constexpr bool btree

::static_assert_validation() { } template -btree

::btree(const key_compare &comp, const allocator_type &alloc) - : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {} - -template -btree

::btree(const btree &other) - : btree(other.key_comp(), other.allocator()) { - copy_or_move_values_in_order(&other); +template +auto btree

::lower_bound_equal(const K &key) const + -> std::pair { + const SearchResult res = + internal_lower_bound(key); + const iterator lower = iterator(internal_end(res.value)); + const bool equal = res.HasMatch() + ? res.IsEq() + : lower != end() && !compare_keys(key, lower.key()); + return {lower, equal}; } template -template -auto btree

::insert_unique(const key_type &key, Args &&... args) +template +auto btree

::equal_range(const K &key) -> std::pair { + const std::pair lower_and_equal = lower_bound_equal(key); + const iterator lower = lower_and_equal.first; + if (!lower_and_equal.second) { + return {lower, lower}; + } + + const iterator next = std::next(lower); + if (!params_type::template can_have_multiple_equivalent_keys()) { + // The next iterator after lower must point to a key greater than `key`. + // Note: if this assert fails, then it may indicate that the comparator does + // not meet the equivalence requirements for Compare + // (see https://en.cppreference.com/w/cpp/named_req/Compare). + assert(next == end() || compare_keys(key, next.key())); + return {lower, next}; + } + // Try once more to avoid the call to upper_bound() if there's only one + // equivalent key. This should prevent all calls to upper_bound() in cases of + // unique-containers with heterogeneous comparators in which all comparison + // operators have the same equivalence classes. + if (next == end() || compare_keys(key, next.key())) return {lower, next}; + + // In this case, we need to call upper_bound() to avoid worst case O(N) + // behavior if we were to iterate over equal keys. + return {lower, upper_bound(key)}; +} + +template +template +auto btree

::insert_unique(const K &key, Args &&... args) -> std::pair { if (empty()) { mutable_root() = rightmost_ = new_leaf_root_node(1); } - auto res = internal_locate(key); - iterator &iter = res.value; + SearchResult res = internal_locate(key); + iterator iter = res.value; if (res.HasMatch()) { if (res.IsEq()) { @@ -1894,7 +2129,7 @@ auto btree

::insert_unique(const key_type &key, Args &&... args) } } else { iterator last = internal_last(iter); - if (last.node && !compare_keys(key, last.key())) { + if (last.node_ && !compare_keys(key, last.key())) { // The key already exists in the tree, do nothing. return {last, false}; } @@ -1903,8 +2138,8 @@ auto btree

::insert_unique(const key_type &key, Args &&... args) } template -template -inline auto btree

::insert_hint_unique(iterator position, const key_type &key, +template +inline auto btree

::insert_hint_unique(iterator position, const K &key, Args &&... args) -> std::pair { if (!empty()) { @@ -1928,13 +2163,22 @@ inline auto btree

::insert_hint_unique(iterator position, const key_type &key, } template -template -void btree

::insert_iterator_unique(InputIterator b, InputIterator e) { +template +void btree

::insert_iterator_unique(InputIterator b, InputIterator e, int) { for (; b != e; ++b) { insert_hint_unique(end(), params_type::key(*b), *b); } } +template +template +void btree

::insert_iterator_unique(InputIterator b, InputIterator e, char) { + for (; b != e; ++b) { + init_type value(*b); + insert_hint_unique(end(), params_type::key(value), std::move(value)); + } +} + template template auto btree

::insert_multi(const key_type &key, ValueType &&v) -> iterator { @@ -1943,7 +2187,7 @@ auto btree

::insert_multi(const key_type &key, ValueType &&v) -> iterator { } iterator iter = internal_upper_bound(key); - if (iter.node == nullptr) { + if (iter.node_ == nullptr) { iter = end(); } return internal_emplace(iter, std::forward(v)); @@ -1990,7 +2234,7 @@ auto btree

::operator=(const btree &other) -> btree & { *mutable_allocator() = other.allocator(); } - copy_or_move_values_in_order(&other); + copy_or_move_values_in_order(other); } return *this; } @@ -2020,7 +2264,7 @@ auto btree

::operator=(btree &&other) noexcept -> btree & { // comparator while moving the values so we can't swap the key // comparators. *mutable_key_comp() = other.key_comp(); - copy_or_move_values_in_order(&other); + copy_or_move_values_in_order(other); } } } @@ -2030,21 +2274,22 @@ auto btree

::operator=(btree &&other) noexcept -> btree & { template auto btree

::erase(iterator iter) -> iterator { bool internal_delete = false; - if (!iter.node->leaf()) { + if (iter.node_->is_internal()) { // Deletion of a value on an internal node. First, move the largest value - // from our left child here, then delete that position (in remove_value() + // from our left child here, then delete that position (in remove_values() // below). We can get to the largest value from our left child by // decrementing iter. iterator internal_iter(iter); --iter; - assert(iter.node->leaf()); - params_type::move(mutable_allocator(), iter.node->slot(iter.position), - internal_iter.node->slot(internal_iter.position)); + assert(iter.node_->is_leaf()); + params_type::move(mutable_allocator(), iter.node_->slot(iter.position_), + internal_iter.node_->slot(internal_iter.position_)); internal_delete = true; } // Delete the key from the leaf. - iter.node->remove_value(iter.position, mutable_allocator()); + iter.node_->remove_values(iter.position_, /*to_erase=*/1, + mutable_allocator()); --size_; // We want to return the next value after the one we just erased. If we @@ -2052,7 +2297,7 @@ auto btree

::erase(iterator iter) -> iterator { // value is ++(++iter). If we erased from a leaf node (internal_delete == // false) then the next value is ++iter. Note that ++iter may point to an // internal node and the value in the internal node may move to a leaf node - // (iter.node) when rebalancing is performed at the leaf level. + // (iter.node_) when rebalancing is performed at the leaf level. iterator res = rebalance_after_delete(iter); @@ -2069,14 +2314,14 @@ auto btree

::rebalance_after_delete(iterator iter) -> iterator { iterator res(iter); bool first_iteration = true; for (;;) { - if (iter.node == root()) { + if (iter.node_ == root()) { try_shrink(); if (empty()) { return end(); } break; } - if (iter.node->count() >= kMinNodeValues) { + if (iter.node_->count() >= kMinNodeValues) { break; } bool merged = try_merge_or_rebalance(&iter); @@ -2089,14 +2334,15 @@ auto btree

::rebalance_after_delete(iterator iter) -> iterator { if (!merged) { break; } - iter.position = iter.node->position(); - iter.node = iter.node->parent(); + iter.position_ = iter.node_->position(); + iter.node_ = iter.node_->parent(); } + res.update_generation(); // Adjust our return value. If we're pointing at the end of a node, advance // the iterator. - if (res.position == res.node->finish()) { - res.position = res.node->finish() - 1; + if (res.position_ == res.node_->finish()) { + res.position_ = res.node_->finish() - 1; ++res; } @@ -2113,105 +2359,43 @@ auto btree

::erase_range(iterator begin, iterator end) return {0, begin}; } - if (count == size_) { + if (static_cast(count) == size_) { clear(); return {count, this->end()}; } - if (begin.node == end.node) { - erase_same_node(begin, end); + if (begin.node_ == end.node_) { + assert(end.position_ > begin.position_); + begin.node_->remove_values(begin.position_, end.position_ - begin.position_, + mutable_allocator()); size_ -= count; return {count, rebalance_after_delete(begin)}; } const size_type target_size = size_ - count; while (size_ > target_size) { - if (begin.node->leaf()) { + if (begin.node_->is_leaf()) { const size_type remaining_to_erase = size_ - target_size; - const size_type remaining_in_node = begin.node->finish() - begin.position; - begin = erase_from_leaf_node( - begin, (std::min)(remaining_to_erase, remaining_in_node)); + const size_type remaining_in_node = + begin.node_->finish() - begin.position_; + const size_type to_erase = + (std::min)(remaining_to_erase, remaining_in_node); + begin.node_->remove_values(begin.position_, to_erase, + mutable_allocator()); + size_ -= to_erase; + begin = rebalance_after_delete(begin); } else { begin = erase(begin); } } + begin.update_generation(); return {count, begin}; } -template -void btree

::erase_same_node(iterator begin, iterator end) { - assert(begin.node == end.node); - assert(end.position > begin.position); - - node_type *node = begin.node; - size_type to_erase = end.position - begin.position; - if (!node->leaf()) { - // Delete all children between begin and end. - for (size_type i = 0; i < to_erase; ++i) { - internal_clear(node->child(begin.position + i + 1)); - } - // Rotate children after end into new positions. - for (size_type i = begin.position + to_erase + 1; i <= node->finish(); - ++i) { - node->set_child(i - to_erase, node->child(i)); - node->clear_child(i); - } - } - node->remove_values_ignore_children(begin.position, to_erase, - mutable_allocator()); - - // Do not need to update rightmost_, because - // * either end == this->end(), and therefore node == rightmost_, and still - // exists - // * or end != this->end(), and therefore rightmost_ hasn't been erased, since - // it wasn't covered in [begin, end) -} - -template -auto btree

::erase_from_leaf_node(iterator begin, size_type to_erase) - -> iterator { - node_type *node = begin.node; - assert(node->leaf()); - assert(node->finish() > begin.position); - assert(begin.position + to_erase <= node->finish()); - - node->remove_values_ignore_children(begin.position, to_erase, - mutable_allocator()); - - size_ -= to_erase; - - return rebalance_after_delete(begin); -} - -template -template -auto btree

::erase_unique(const K &key) -> size_type { - const iterator iter = internal_find(key); - if (iter.node == nullptr) { - // The key doesn't exist in the tree, return nothing done. - return 0; - } - erase(iter); - return 1; -} - -template -template -auto btree

::erase_multi(const K &key) -> size_type { - const iterator begin = internal_lower_bound(key); - if (begin.node == nullptr) { - // The key doesn't exist in the tree, return nothing done. - return 0; - } - // Delete all of the keys between begin and upper_bound(key). - const iterator end = internal_end(internal_upper_bound(key)); - return erase_range(begin, end).first; -} - template void btree

::clear() { if (!empty()) { - internal_clear(root()); + node_type::clear_and_delete(root(), mutable_allocator()); } mutable_root() = EmptyNode(); rightmost_ = EmptyNode(); @@ -2241,18 +2425,18 @@ void btree

::verify() const { assert(leftmost() != nullptr); assert(rightmost_ != nullptr); assert(empty() || size() == internal_verify(root(), nullptr, nullptr)); - assert(leftmost() == (++const_iterator(root(), -1)).node); - assert(rightmost_ == (--const_iterator(root(), root()->finish())).node); - assert(leftmost()->leaf()); - assert(rightmost_->leaf()); + assert(leftmost() == (++const_iterator(root(), -1)).node_); + assert(rightmost_ == (--const_iterator(root(), root()->finish())).node_); + assert(leftmost()->is_leaf()); + assert(rightmost_->is_leaf()); } template void btree

::rebalance_or_split(iterator *iter) { - node_type *&node = iter->node; - int &insert_position = iter->position; + node_type *&node = iter->node_; + int &insert_position = iter->position_; assert(node->count() == node->max_count()); - assert(kNodeValues == node->max_count()); + assert(kNodeSlots == node->max_count()); // First try to make room on the node by rebalancing. node_type *parent = node->parent(); @@ -2260,17 +2444,17 @@ void btree

::rebalance_or_split(iterator *iter) { if (node->position() > parent->start()) { // Try rebalancing with our left sibling. node_type *left = parent->child(node->position() - 1); - assert(left->max_count() == kNodeValues); - if (left->count() < kNodeValues) { + assert(left->max_count() == kNodeSlots); + if (left->count() < kNodeSlots) { // We bias rebalancing based on the position being inserted. If we're // inserting at the end of the right node then we bias rebalancing to // fill up the left node. - int to_move = (kNodeValues - left->count()) / - (1 + (insert_position < kNodeValues)); + int to_move = (kNodeSlots - left->count()) / + (1 + (insert_position < static_cast(kNodeSlots))); to_move = (std::max)(1, to_move); if (insert_position - to_move >= node->start() || - left->count() + to_move < kNodeValues) { + left->count() + to_move < static_cast(kNodeSlots)) { left->rebalance_right_to_left(to_move, node, mutable_allocator()); assert(node->max_count() - node->count() == to_move); @@ -2289,17 +2473,17 @@ void btree

::rebalance_or_split(iterator *iter) { if (node->position() < parent->finish()) { // Try rebalancing with our right sibling. node_type *right = parent->child(node->position() + 1); - assert(right->max_count() == kNodeValues); - if (right->count() < kNodeValues) { + assert(right->max_count() == kNodeSlots); + if (right->count() < kNodeSlots) { // We bias rebalancing based on the position being inserted. If we're // inserting at the beginning of the left node then we bias rebalancing // to fill up the right node. - int to_move = (kNodeValues - right->count()) / + int to_move = (static_cast(kNodeSlots) - right->count()) / (1 + (insert_position > node->start())); to_move = (std::max)(1, to_move); if (insert_position <= node->finish() - to_move || - right->count() + to_move < kNodeValues) { + right->count() + to_move < static_cast(kNodeSlots)) { node->rebalance_left_to_right(to_move, right, mutable_allocator()); if (insert_position > node->finish()) { @@ -2315,8 +2499,8 @@ void btree

::rebalance_or_split(iterator *iter) { // Rebalancing failed, make sure there is room on the parent node for a new // value. - assert(parent->max_count() == kNodeValues); - if (parent->count() == kNodeValues) { + assert(parent->max_count() == kNodeSlots); + if (parent->count() == kNodeSlots) { iterator parent_iter(node->parent(), node->position()); rebalance_or_split(&parent_iter); } @@ -2325,16 +2509,17 @@ void btree

::rebalance_or_split(iterator *iter) { // Create a new root node and set the current root node as the child of the // new root. parent = new_internal_node(parent); + parent->set_generation(root()->generation()); parent->init_child(parent->start(), root()); mutable_root() = parent; // If the former root was a leaf node, then it's now the rightmost node. - assert(!parent->start_child()->leaf() || + assert(parent->start_child()->is_internal() || parent->start_child() == rightmost_); } // Split the node. node_type *split_node; - if (node->leaf()) { + if (node->is_leaf()) { split_node = new_leaf_node(parent); node->split(insert_position, split_node, mutable_allocator()); if (rightmost_ == node) rightmost_ = split_node; @@ -2352,60 +2537,56 @@ void btree

::rebalance_or_split(iterator *iter) { template void btree

::merge_nodes(node_type *left, node_type *right) { left->merge(right, mutable_allocator()); - if (right->leaf()) { - if (rightmost_ == right) rightmost_ = left; - delete_leaf_node(right); - } else { - delete_internal_node(right); - } + if (rightmost_ == right) rightmost_ = left; } template bool btree

::try_merge_or_rebalance(iterator *iter) { - node_type *parent = iter->node->parent(); - if (iter->node->position() > parent->start()) { + node_type *parent = iter->node_->parent(); + if (iter->node_->position() > parent->start()) { // Try merging with our left sibling. - node_type *left = parent->child(iter->node->position() - 1); - assert(left->max_count() == kNodeValues); - if (1 + left->count() + iter->node->count() <= kNodeValues) { - iter->position += 1 + left->count(); - merge_nodes(left, iter->node); - iter->node = left; + node_type *left = parent->child(iter->node_->position() - 1); + assert(left->max_count() == kNodeSlots); + if (1U + left->count() + iter->node_->count() <= kNodeSlots) { + iter->position_ += 1 + left->count(); + merge_nodes(left, iter->node_); + iter->node_ = left; return true; } } - if (iter->node->position() < parent->finish()) { + if (iter->node_->position() < parent->finish()) { // Try merging with our right sibling. - node_type *right = parent->child(iter->node->position() + 1); - assert(right->max_count() == kNodeValues); - if (1 + iter->node->count() + right->count() <= kNodeValues) { - merge_nodes(iter->node, right); + node_type *right = parent->child(iter->node_->position() + 1); + assert(right->max_count() == kNodeSlots); + if (1U + iter->node_->count() + right->count() <= kNodeSlots) { + merge_nodes(iter->node_, right); return true; } // Try rebalancing with our right sibling. We don't perform rebalancing if - // we deleted the first element from iter->node and the node is not + // we deleted the first element from iter->node_ and the node is not // empty. This is a small optimization for the common pattern of deleting // from the front of the tree. if (right->count() > kMinNodeValues && - (iter->node->count() == 0 || iter->position > iter->node->start())) { - int to_move = (right->count() - iter->node->count()) / 2; + (iter->node_->count() == 0 || iter->position_ > iter->node_->start())) { + int to_move = (right->count() - iter->node_->count()) / 2; to_move = (std::min)(to_move, right->count() - 1); - iter->node->rebalance_right_to_left(to_move, right, mutable_allocator()); + iter->node_->rebalance_right_to_left(to_move, right, mutable_allocator()); return false; } } - if (iter->node->position() > parent->start()) { + if (iter->node_->position() > parent->start()) { // Try rebalancing with our left sibling. We don't perform rebalancing if - // we deleted the last element from iter->node and the node is not + // we deleted the last element from iter->node_ and the node is not // empty. This is a small optimization for the common pattern of deleting // from the back of the tree. - node_type *left = parent->child(iter->node->position() - 1); + node_type *left = parent->child(iter->node_->position() - 1); if (left->count() > kMinNodeValues && - (iter->node->count() == 0 || iter->position < iter->node->finish())) { - int to_move = (left->count() - iter->node->count()) / 2; + (iter->node_->count() == 0 || + iter->position_ < iter->node_->finish())) { + int to_move = (left->count() - iter->node_->count()) / 2; to_move = (std::min)(to_move, left->count() - 1); - left->rebalance_left_to_right(to_move, iter->node, mutable_allocator()); - iter->position += to_move; + left->rebalance_left_to_right(to_move, iter->node_, mutable_allocator()); + iter->position_ += to_move; return false; } } @@ -2414,34 +2595,35 @@ bool btree

::try_merge_or_rebalance(iterator *iter) { template void btree

::try_shrink() { - if (root()->count() > 0) { + node_type *orig_root = root(); + if (orig_root->count() > 0) { return; } // Deleted the last item on the root node, shrink the height of the tree. - if (root()->leaf()) { + if (orig_root->is_leaf()) { assert(size() == 0); - delete_leaf_node(root()); mutable_root() = rightmost_ = EmptyNode(); } else { - node_type *child = root()->start_child(); + node_type *child = orig_root->start_child(); child->make_root(); - delete_internal_node(root()); mutable_root() = child; } + node_type::clear_and_delete(orig_root, mutable_allocator()); } template template inline IterType btree

::internal_last(IterType iter) { - assert(iter.node != nullptr); - while (iter.position == iter.node->finish()) { - iter.position = iter.node->position(); - iter.node = iter.node->parent(); - if (iter.node->leaf()) { - iter.node = nullptr; + assert(iter.node_ != nullptr); + while (iter.position_ == iter.node_->finish()) { + iter.position_ = iter.node_->position(); + iter.node_ = iter.node_->parent(); + if (iter.node_->is_leaf()) { + iter.node_ = nullptr; break; } } + iter.update_generation(); return iter; } @@ -2449,38 +2631,39 @@ template template inline auto btree

::internal_emplace(iterator iter, Args &&... args) -> iterator { - if (!iter.node->leaf()) { + if (iter.node_->is_internal()) { // We can't insert on an internal node. Instead, we'll insert after the // previous value which is guaranteed to be on a leaf node. --iter; - ++iter.position; + ++iter.position_; } - const int max_count = iter.node->max_count(); + const field_type max_count = iter.node_->max_count(); allocator_type *alloc = mutable_allocator(); - if (iter.node->count() == max_count) { + if (iter.node_->count() == max_count) { // Make room in the leaf for the new item. - if (max_count < kNodeValues) { + if (max_count < kNodeSlots) { // Insertion into the root where the root is smaller than the full node // size. Simply grow the size of the root node. - assert(iter.node == root()); - iter.node = - new_leaf_root_node((std::min)(kNodeValues, 2 * max_count)); + assert(iter.node_ == root()); + iter.node_ = + new_leaf_root_node((std::min)(kNodeSlots, 2 * max_count)); // Transfer the values from the old root to the new root. node_type *old_root = root(); - node_type *new_root = iter.node; - for (int i = old_root->start(), f = old_root->finish(); i < f; ++i) { - new_root->transfer(i, i, old_root, alloc); - } + node_type *new_root = iter.node_; + new_root->transfer_n(old_root->count(), new_root->start(), + old_root->start(), old_root, alloc); new_root->set_finish(old_root->finish()); old_root->set_finish(old_root->start()); - delete_leaf_node(old_root); + new_root->set_generation(old_root->generation()); + node_type::clear_and_delete(old_root, alloc); mutable_root() = rightmost_ = new_root; } else { rebalance_or_split(&iter); } } - iter.node->emplace_value(iter.position, alloc, std::forward(args)...); + iter.node_->emplace_value(iter.position_, alloc, std::forward(args)...); ++size_; + iter.update_generation(); return iter; } @@ -2488,61 +2671,51 @@ template template inline auto btree

::internal_locate(const K &key) const -> SearchResult { - return internal_locate_impl(key, is_key_compare_to()); -} - -template -template -inline auto btree

::internal_locate_impl( - const K &key, std::false_type /* IsCompareTo */) const - -> SearchResult { iterator iter(const_cast(root())); for (;;) { - iter.position = iter.node->lower_bound(key, key_comp()).value; - // NOTE: we don't need to walk all the way down the tree if the keys are - // equal, but determining equality would require doing an extra comparison - // on each node on the way down, and we will need to go all the way to the - // leaf node in the expected case. - if (iter.node->leaf()) { - break; - } - iter.node = iter.node->child(iter.position); - } - return {iter}; -} - -template -template -inline auto btree

::internal_locate_impl( - const K &key, std::true_type /* IsCompareTo */) const - -> SearchResult { - iterator iter(const_cast(root())); - for (;;) { - SearchResult res = iter.node->lower_bound(key, key_comp()); - iter.position = res.value; - if (res.match == MatchKind::kEq) { + SearchResult res = + iter.node_->lower_bound(key, key_comp()); + iter.position_ = res.value; + if (res.IsEq()) { return {iter, MatchKind::kEq}; } - if (iter.node->leaf()) { + // Note: in the non-key-compare-to case, we don't need to walk all the way + // down the tree if the keys are equal, but determining equality would + // require doing an extra comparison on each node on the way down, and we + // will need to go all the way to the leaf node in the expected case. + if (iter.node_->is_leaf()) { break; } - iter.node = iter.node->child(iter.position); + iter.node_ = iter.node_->child(iter.position_); } + // Note: in the non-key-compare-to case, the key may actually be equivalent + // here (and the MatchKind::kNe is ignored). return {iter, MatchKind::kNe}; } template template -auto btree

::internal_lower_bound(const K &key) const -> iterator { +auto btree

::internal_lower_bound(const K &key) const + -> SearchResult { + if (!params_type::template can_have_multiple_equivalent_keys()) { + SearchResult ret = internal_locate(key); + ret.value = internal_last(ret.value); + return ret; + } iterator iter(const_cast(root())); + SearchResult res; + bool seen_eq = false; for (;;) { - iter.position = iter.node->lower_bound(key, key_comp()).value; - if (iter.node->leaf()) { + res = iter.node_->lower_bound(key, key_comp()); + iter.position_ = res.value; + if (iter.node_->is_leaf()) { break; } - iter.node = iter.node->child(iter.position); + seen_eq = seen_eq || res.IsEq(); + iter.node_ = iter.node_->child(iter.position_); } - return internal_last(iter); + if (res.IsEq()) return {iter, MatchKind::kEq}; + return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe}; } template @@ -2550,11 +2723,11 @@ template auto btree

::internal_upper_bound(const K &key) const -> iterator { iterator iter(const_cast(root())); for (;;) { - iter.position = iter.node->upper_bound(key, key_comp()); - if (iter.node->leaf()) { + iter.position_ = iter.node_->upper_bound(key, key_comp()); + if (iter.node_->is_leaf()) { break; } - iter.node = iter.node->child(iter.position); + iter.node_ = iter.node_->child(iter.position_); } return internal_last(iter); } @@ -2562,32 +2735,20 @@ auto btree

::internal_upper_bound(const K &key) const -> iterator { template template auto btree

::internal_find(const K &key) const -> iterator { - auto res = internal_locate(key); + SearchResult res = internal_locate(key); if (res.HasMatch()) { if (res.IsEq()) { return res.value; } } else { const iterator iter = internal_last(res.value); - if (iter.node != nullptr && !compare_keys(key, iter.key())) { + if (iter.node_ != nullptr && !compare_keys(key, iter.key())) { return iter; } } return {nullptr, 0}; } -template -void btree

::internal_clear(node_type *node) { - if (!node->leaf()) { - for (int i = node->start(); i <= node->finish(); ++i) { - internal_clear(node->child(i)); - } - delete_internal_node(node); - } else { - delete_leaf_node(node); - } -} - template int btree

::internal_verify(const node_type *node, const key_type *lo, const key_type *hi) const { @@ -2603,7 +2764,7 @@ int btree

::internal_verify(const node_type *node, const key_type *lo, assert(!compare_keys(node->key(i), node->key(i - 1))); } int count = node->count(); - if (!node->leaf()) { + if (node->is_internal()) { for (int i = node->start(); i <= node->finish(); ++i) { assert(node->child(i) != nullptr); assert(node->child(i)->parent() == node); @@ -2616,6 +2777,50 @@ int btree

::internal_verify(const node_type *node, const key_type *lo, return count; } +struct btree_access { + template + static auto erase_if(BtreeContainer &container, Pred pred) + -> typename BtreeContainer::size_type { + const auto initial_size = container.size(); + auto &tree = container.tree_; + auto *alloc = tree.mutable_allocator(); + for (auto it = container.begin(); it != container.end();) { + if (!pred(*it)) { + ++it; + continue; + } + auto *node = it.node_; + if (node->is_internal()) { + // Handle internal nodes normally. + it = container.erase(it); + continue; + } + // If this is a leaf node, then we do all the erases from this node + // at once before doing rebalancing. + + // The current position to transfer slots to. + int to_pos = it.position_; + node->value_destroy(it.position_, alloc); + while (++it.position_ < node->finish()) { + it.update_generation(); + if (pred(*it)) { + node->value_destroy(it.position_, alloc); + } else { + node->transfer(node->slot(to_pos++), node->slot(it.position_), alloc); + } + } + const int num_deleted = node->finish() - to_pos; + tree.size_ -= num_deleted; + node->set_finish(to_pos); + it.position_ = to_pos; + it = tree.rebalance_after_delete(it); + } + return initial_size - container.size(); + } +}; + +#undef ABSL_BTREE_ENABLE_GENERATIONS + } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/btree_container.h b/TMessagesProj/jni/voip/webrtc/absl/container/internal/btree_container.h index 734c90ef3..cc2e1793a 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/btree_container.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/btree_container.h @@ -20,9 +20,11 @@ #include #include +#include "absl/base/attributes.h" #include "absl/base/internal/throw_delegate.h" #include "absl/container/internal/btree.h" // IWYU pragma: export #include "absl/container/internal/common.h" +#include "absl/memory/memory.h" #include "absl/meta/type_traits.h" namespace absl { @@ -42,15 +44,15 @@ class btree_container { // transparent case. template using key_arg = - typename KeyArg::value>:: - template type; + typename KeyArg::template type< + K, typename Tree::key_type>; public: using key_type = typename Tree::key_type; using value_type = typename Tree::value_type; using size_type = typename Tree::size_type; using difference_type = typename Tree::difference_type; - using key_compare = typename Tree::key_compare; + using key_compare = typename Tree::original_key_compare; using value_compare = typename Tree::value_compare; using allocator_type = typename Tree::allocator_type; using reference = typename Tree::reference; @@ -68,8 +70,21 @@ class btree_container { explicit btree_container(const key_compare &comp, const allocator_type &alloc = allocator_type()) : tree_(comp, alloc) {} - btree_container(const btree_container &other) = default; - btree_container(btree_container &&other) noexcept = default; + explicit btree_container(const allocator_type &alloc) + : tree_(key_compare(), alloc) {} + + btree_container(const btree_container &other) + : btree_container(other, absl::allocator_traits:: + select_on_container_copy_construction( + other.get_allocator())) {} + btree_container(const btree_container &other, const allocator_type &alloc) + : tree_(other.tree_, alloc) {} + + btree_container(btree_container &&other) noexcept( + std::is_nothrow_move_constructible::value) = default; + btree_container(btree_container &&other, const allocator_type &alloc) + : tree_(std::move(other.tree_), alloc) {} + btree_container &operator=(const btree_container &other) = default; btree_container &operator=(btree_container &&other) noexcept( std::is_nothrow_move_assignable::value) = default; @@ -90,6 +105,11 @@ class btree_container { // Lookup routines. template + size_type count(const key_arg &key) const { + auto equal_range = this->equal_range(key); + return std::distance(equal_range.first, equal_range.second); + } + template iterator find(const key_arg &key) { return tree_.find(key); } @@ -138,6 +158,11 @@ class btree_container { iterator erase(const_iterator first, const_iterator last) { return tree_.erase_range(iterator(first), iterator(last)).second; } + template + size_type erase(const key_arg &key) { + auto equal_range = this->equal_range(key); + return tree_.erase_range(equal_range.first, equal_range.second).first; + } // Extract routines. node_type extract(iterator position) { @@ -151,9 +176,8 @@ class btree_container { return extract(iterator(position)); } - public: // Utility routines. - void clear() { tree_.clear(); } + ABSL_ATTRIBUTE_REINITIALIZES void clear() { tree_.clear(); } void swap(btree_container &other) { tree_.swap(other.tree_); } void verify() const { tree_.verify(); } @@ -191,7 +215,7 @@ class btree_container { allocator_type get_allocator() const { return tree_.get_allocator(); } // The key comparator used by the btree. - key_compare key_comp() const { return tree_.key_comp(); } + key_compare key_comp() const { return key_compare(tree_.key_comp()); } value_compare value_comp() const { return tree_.value_comp(); } // Support absl::Hash. @@ -204,6 +228,7 @@ class btree_container { } protected: + friend struct btree_access; Tree tree_; }; @@ -224,7 +249,7 @@ class btree_set_container : public btree_container { using key_type = typename Tree::key_type; using value_type = typename Tree::value_type; using size_type = typename Tree::size_type; - using key_compare = typename Tree::key_compare; + using key_compare = typename Tree::original_key_compare; using allocator_type = typename Tree::allocator_type; using iterator = typename Tree::iterator; using const_iterator = typename Tree::const_iterator; @@ -235,7 +260,7 @@ class btree_set_container : public btree_container { using super_type::super_type; btree_set_container() {} - // Range constructor. + // Range constructors. template btree_set_container(InputIterator b, InputIterator e, const key_compare &comp = key_compare(), @@ -243,18 +268,19 @@ class btree_set_container : public btree_container { : super_type(comp, alloc) { insert(b, e); } + template + btree_set_container(InputIterator b, InputIterator e, + const allocator_type &alloc) + : btree_set_container(b, e, key_compare(), alloc) {} - // Initializer list constructor. + // Initializer list constructors. btree_set_container(std::initializer_list init, const key_compare &comp = key_compare(), const allocator_type &alloc = allocator_type()) : btree_set_container(init.begin(), init.end(), comp, alloc) {} - - // Lookup routines. - template - size_type count(const key_arg &key) const { - return this->tree_.count_unique(key); - } + btree_set_container(std::initializer_list init, + const allocator_type &alloc) + : btree_set_container(init.begin(), init.end(), alloc) {} // Insertion routines. std::pair insert(const value_type &v) { @@ -268,31 +294,29 @@ class btree_set_container : public btree_container { init_type v(std::forward(args)...); return this->tree_.insert_unique(params_type::key(v), std::move(v)); } - iterator insert(const_iterator position, const value_type &v) { + iterator insert(const_iterator hint, const value_type &v) { return this->tree_ - .insert_hint_unique(iterator(position), params_type::key(v), v) + .insert_hint_unique(iterator(hint), params_type::key(v), v) .first; } - iterator insert(const_iterator position, value_type &&v) { + iterator insert(const_iterator hint, value_type &&v) { return this->tree_ - .insert_hint_unique(iterator(position), params_type::key(v), - std::move(v)) + .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v)) .first; } template - iterator emplace_hint(const_iterator position, Args &&... args) { + iterator emplace_hint(const_iterator hint, Args &&... args) { init_type v(std::forward(args)...); return this->tree_ - .insert_hint_unique(iterator(position), params_type::key(v), - std::move(v)) + .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v)) .first; } template void insert(InputIterator b, InputIterator e) { - this->tree_.insert_iterator_unique(b, e); + this->tree_.insert_iterator_unique(b, e, 0); } void insert(std::initializer_list init) { - this->tree_.insert_iterator_unique(init.begin(), init.end()); + this->tree_.insert_iterator_unique(init.begin(), init.end(), 0); } insert_return_type insert(node_type &&node) { if (!node) return {this->end(), false, node_type()}; @@ -315,18 +339,13 @@ class btree_set_container : public btree_container { return res.first; } - // Deletion routines. - template - size_type erase(const key_arg &key) { - return this->tree_.erase_unique(key); - } - using super_type::erase; - // Node extraction routines. template node_type extract(const key_arg &key) { - auto it = this->find(key); - return it == this->end() ? node_type() : extract(it); + const std::pair lower_and_equal = + this->tree_.lower_bound_equal(key); + return lower_and_equal.second ? extract(lower_and_equal.first) + : node_type(); } using super_type::extract; @@ -344,7 +363,7 @@ class btree_set_container : public btree_container { int> = 0> void merge(btree_container &src) { // NOLINT for (auto src_it = src.begin(); src_it != src.end();) { - if (insert(std::move(*src_it)).second) { + if (insert(std::move(params_type::element(src_it.slot()))).second) { src_it = src.erase(src_it); } else { ++src_it; @@ -371,6 +390,7 @@ template class btree_map_container : public btree_set_container { using super_type = btree_set_container; using params_type = typename Tree::params_type; + friend class BtreeNodePeer; private: template @@ -380,7 +400,7 @@ class btree_map_container : public btree_set_container { using key_type = typename Tree::key_type; using mapped_type = typename params_type::mapped_type; using value_type = typename Tree::value_type; - using key_compare = typename Tree::key_compare; + using key_compare = typename Tree::original_key_compare; using allocator_type = typename Tree::allocator_type; using iterator = typename Tree::iterator; using const_iterator = typename Tree::const_iterator; @@ -392,111 +412,72 @@ class btree_map_container : public btree_set_container { // Insertion routines. // Note: the nullptr template arguments and extra `const M&` overloads allow // for supporting bitfield arguments. - // Note: when we call `std::forward(obj)` twice, it's safe because - // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when - // `ret.second` is false. - template - std::pair insert_or_assign(const key_type &k, const M &obj) { - const std::pair ret = this->tree_.insert_unique(k, k, obj); - if (!ret.second) ret.first->second = obj; - return ret; + template + std::pair insert_or_assign(const key_arg &k, + const M &obj) { + return insert_or_assign_impl(k, obj); } - template - std::pair insert_or_assign(key_type &&k, const M &obj) { - const std::pair ret = - this->tree_.insert_unique(k, std::move(k), obj); - if (!ret.second) ret.first->second = obj; - return ret; + template + std::pair insert_or_assign(key_arg &&k, const M &obj) { + return insert_or_assign_impl(std::forward(k), obj); } - template - std::pair insert_or_assign(const key_type &k, M &&obj) { - const std::pair ret = - this->tree_.insert_unique(k, k, std::forward(obj)); - if (!ret.second) ret.first->second = std::forward(obj); - return ret; + template + std::pair insert_or_assign(const key_arg &k, M &&obj) { + return insert_or_assign_impl(k, std::forward(obj)); } - template - std::pair insert_or_assign(key_type &&k, M &&obj) { - const std::pair ret = - this->tree_.insert_unique(k, std::move(k), std::forward(obj)); - if (!ret.second) ret.first->second = std::forward(obj); - return ret; + template + std::pair insert_or_assign(key_arg &&k, M &&obj) { + return insert_or_assign_impl(std::forward(k), std::forward(obj)); } - template - iterator insert_or_assign(const_iterator position, const key_type &k, + template + iterator insert_or_assign(const_iterator hint, const key_arg &k, const M &obj) { - const std::pair ret = - this->tree_.insert_hint_unique(iterator(position), k, k, obj); - if (!ret.second) ret.first->second = obj; - return ret.first; + return insert_or_assign_hint_impl(hint, k, obj); } - template - iterator insert_or_assign(const_iterator position, key_type &&k, - const M &obj) { - const std::pair ret = this->tree_.insert_hint_unique( - iterator(position), k, std::move(k), obj); - if (!ret.second) ret.first->second = obj; - return ret.first; + template + iterator insert_or_assign(const_iterator hint, key_arg &&k, const M &obj) { + return insert_or_assign_hint_impl(hint, std::forward(k), obj); } - template - iterator insert_or_assign(const_iterator position, const key_type &k, - M &&obj) { - const std::pair ret = this->tree_.insert_hint_unique( - iterator(position), k, k, std::forward(obj)); - if (!ret.second) ret.first->second = std::forward(obj); - return ret.first; + template + iterator insert_or_assign(const_iterator hint, const key_arg &k, M &&obj) { + return insert_or_assign_hint_impl(hint, k, std::forward(obj)); } - template - iterator insert_or_assign(const_iterator position, key_type &&k, M &&obj) { - const std::pair ret = this->tree_.insert_hint_unique( - iterator(position), k, std::move(k), std::forward(obj)); - if (!ret.second) ret.first->second = std::forward(obj); - return ret.first; + template + iterator insert_or_assign(const_iterator hint, key_arg &&k, M &&obj) { + return insert_or_assign_hint_impl(hint, std::forward(k), + std::forward(obj)); } - template - std::pair try_emplace(const key_type &k, Args &&... args) { - return this->tree_.insert_unique( - k, std::piecewise_construct, std::forward_as_tuple(k), - std::forward_as_tuple(std::forward(args)...)); + + template ::value, int> = 0> + std::pair try_emplace(const key_arg &k, Args &&... args) { + return try_emplace_impl(k, std::forward(args)...); } - template - std::pair try_emplace(key_type &&k, Args &&... args) { - // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k` - // and then using `k` unsequenced. This is safe because the move is into a - // forwarding reference and insert_unique guarantees that `key` is never - // referenced after consuming `args`. - const key_type &key_ref = k; - return this->tree_.insert_unique( - key_ref, std::piecewise_construct, std::forward_as_tuple(std::move(k)), - std::forward_as_tuple(std::forward(args)...)); + template ::value, int> = 0> + std::pair try_emplace(key_arg &&k, Args &&... args) { + return try_emplace_impl(std::forward(k), std::forward(args)...); } - template - iterator try_emplace(const_iterator hint, const key_type &k, + template + iterator try_emplace(const_iterator hint, const key_arg &k, Args &&... args) { - return this->tree_ - .insert_hint_unique(iterator(hint), k, std::piecewise_construct, - std::forward_as_tuple(k), - std::forward_as_tuple(std::forward(args)...)) - .first; + return try_emplace_hint_impl(hint, k, std::forward(args)...); } - template - iterator try_emplace(const_iterator hint, key_type &&k, Args &&... args) { - // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k` - // and then using `k` unsequenced. This is safe because the move is into a - // forwarding reference and insert_hint_unique guarantees that `key` is - // never referenced after consuming `args`. - const key_type &key_ref = k; - return this->tree_ - .insert_hint_unique(iterator(hint), key_ref, std::piecewise_construct, - std::forward_as_tuple(std::move(k)), - std::forward_as_tuple(std::forward(args)...)) - .first; + template + iterator try_emplace(const_iterator hint, key_arg &&k, Args &&... args) { + return try_emplace_hint_impl(hint, std::forward(k), + std::forward(args)...); } - mapped_type &operator[](const key_type &k) { + + template + mapped_type &operator[](const key_arg &k) { return try_emplace(k).first->second; } - mapped_type &operator[](key_type &&k) { - return try_emplace(std::move(k)).first->second; + template + mapped_type &operator[](key_arg &&k) { + return try_emplace(std::forward(k)).first->second; } template @@ -513,6 +494,40 @@ class btree_map_container : public btree_set_container { base_internal::ThrowStdOutOfRange("absl::btree_map::at"); return it->second; } + + private: + // Note: when we call `std::forward(obj)` twice, it's safe because + // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when + // `ret.second` is false. + template + std::pair insert_or_assign_impl(K &&k, M &&obj) { + const std::pair ret = + this->tree_.insert_unique(k, std::forward(k), std::forward(obj)); + if (!ret.second) ret.first->second = std::forward(obj); + return ret; + } + template + iterator insert_or_assign_hint_impl(const_iterator hint, K &&k, M &&obj) { + const std::pair ret = this->tree_.insert_hint_unique( + iterator(hint), k, std::forward(k), std::forward(obj)); + if (!ret.second) ret.first->second = std::forward(obj); + return ret.first; + } + + template + std::pair try_emplace_impl(K &&k, Args &&... args) { + return this->tree_.insert_unique( + k, std::piecewise_construct, std::forward_as_tuple(std::forward(k)), + std::forward_as_tuple(std::forward(args)...)); + } + template + iterator try_emplace_hint_impl(const_iterator hint, K &&k, Args &&... args) { + return this->tree_ + .insert_hint_unique(iterator(hint), k, std::piecewise_construct, + std::forward_as_tuple(std::forward(k)), + std::forward_as_tuple(std::forward(args)...)) + .first; + } }; // A common base class for btree_multiset and btree_multimap. @@ -522,6 +537,7 @@ class btree_multiset_container : public btree_container { using params_type = typename Tree::params_type; using init_type = typename params_type::init_type; using is_key_compare_to = typename params_type::is_key_compare_to; + friend class BtreeNodePeer; template using key_arg = typename super_type::template key_arg; @@ -530,7 +546,7 @@ class btree_multiset_container : public btree_container { using key_type = typename Tree::key_type; using value_type = typename Tree::value_type; using size_type = typename Tree::size_type; - using key_compare = typename Tree::key_compare; + using key_compare = typename Tree::original_key_compare; using allocator_type = typename Tree::allocator_type; using iterator = typename Tree::iterator; using const_iterator = typename Tree::const_iterator; @@ -540,7 +556,7 @@ class btree_multiset_container : public btree_container { using super_type::super_type; btree_multiset_container() {} - // Range constructor. + // Range constructors. template btree_multiset_container(InputIterator b, InputIterator e, const key_compare &comp = key_compare(), @@ -548,29 +564,30 @@ class btree_multiset_container : public btree_container { : super_type(comp, alloc) { insert(b, e); } + template + btree_multiset_container(InputIterator b, InputIterator e, + const allocator_type &alloc) + : btree_multiset_container(b, e, key_compare(), alloc) {} - // Initializer list constructor. + // Initializer list constructors. btree_multiset_container(std::initializer_list init, const key_compare &comp = key_compare(), const allocator_type &alloc = allocator_type()) : btree_multiset_container(init.begin(), init.end(), comp, alloc) {} - - // Lookup routines. - template - size_type count(const key_arg &key) const { - return this->tree_.count_multi(key); - } + btree_multiset_container(std::initializer_list init, + const allocator_type &alloc) + : btree_multiset_container(init.begin(), init.end(), alloc) {} // Insertion routines. iterator insert(const value_type &v) { return this->tree_.insert_multi(v); } iterator insert(value_type &&v) { return this->tree_.insert_multi(std::move(v)); } - iterator insert(const_iterator position, const value_type &v) { - return this->tree_.insert_hint_multi(iterator(position), v); + iterator insert(const_iterator hint, const value_type &v) { + return this->tree_.insert_hint_multi(iterator(hint), v); } - iterator insert(const_iterator position, value_type &&v) { - return this->tree_.insert_hint_multi(iterator(position), std::move(v)); + iterator insert(const_iterator hint, value_type &&v) { + return this->tree_.insert_hint_multi(iterator(hint), std::move(v)); } template void insert(InputIterator b, InputIterator e) { @@ -584,9 +601,9 @@ class btree_multiset_container : public btree_container { return this->tree_.insert_multi(init_type(std::forward(args)...)); } template - iterator emplace_hint(const_iterator position, Args &&... args) { + iterator emplace_hint(const_iterator hint, Args &&... args) { return this->tree_.insert_hint_multi( - iterator(position), init_type(std::forward(args)...)); + iterator(hint), init_type(std::forward(args)...)); } iterator insert(node_type &&node) { if (!node) return this->end(); @@ -605,18 +622,13 @@ class btree_multiset_container : public btree_container { return res; } - // Deletion routines. - template - size_type erase(const key_arg &key) { - return this->tree_.erase_multi(key); - } - using super_type::erase; - // Node extraction routines. template node_type extract(const key_arg &key) { - auto it = this->find(key); - return it == this->end() ? node_type() : extract(it); + const std::pair lower_and_equal = + this->tree_.lower_bound_equal(key); + return lower_and_equal.second ? extract(lower_and_equal.first) + : node_type(); } using super_type::extract; @@ -632,8 +644,9 @@ class btree_multiset_container : public btree_container { typename T::params_type::is_map_container>>::value, int> = 0> void merge(btree_container &src) { // NOLINT - insert(std::make_move_iterator(src.begin()), - std::make_move_iterator(src.end())); + for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) { + insert(std::move(params_type::element(src_it.slot()))); + } src.clear(); } @@ -656,6 +669,7 @@ template class btree_multimap_container : public btree_multiset_container { using super_type = btree_multiset_container; using params_type = typename Tree::params_type; + friend class BtreeNodePeer; public: using mapped_type = typename params_type::mapped_type; diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/common.h b/TMessagesProj/jni/voip/webrtc/absl/container/internal/common.h index 8990f2947..030e9d4ab 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/common.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/common.h @@ -146,8 +146,11 @@ class node_handle decltype(PolicyTraits::key(std::declval())) { - return PolicyTraits::key(this->slot()); + // When C++17 is available, we can use std::launder to provide mutable + // access to the key. Otherwise, we provide const access. + auto key() const + -> decltype(PolicyTraits::mutable_key(std::declval())) { + return PolicyTraits::mutable_key(this->slot()); } mapped_type& mapped() const { diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/compressed_tuple.h b/TMessagesProj/jni/voip/webrtc/absl/container/internal/compressed_tuple.h index 02bfd03f6..5ebe16494 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/compressed_tuple.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/compressed_tuple.h @@ -257,7 +257,7 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple template ElemT& get() & { - return internal_compressed_tuple::Storage, I>::get(); + return StorageT::get(); } template diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/compressed_tuple_test.cc b/TMessagesProj/jni/voip/webrtc/absl/container/internal/compressed_tuple_test.cc new file mode 100644 index 000000000..74111f975 --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/compressed_tuple_test.cc @@ -0,0 +1,419 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/compressed_tuple.h" + +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/test_instance_tracker.h" +#include "absl/memory/memory.h" +#include "absl/types/any.h" +#include "absl/types/optional.h" +#include "absl/utility/utility.h" + +// These are declared at global scope purely so that error messages +// are smaller and easier to understand. +enum class CallType { kConstRef, kConstMove }; + +template +struct Empty { + constexpr CallType value() const& { return CallType::kConstRef; } + constexpr CallType value() const&& { return CallType::kConstMove; } +}; + +template +struct NotEmpty { + T value; +}; + +template +struct TwoValues { + T value1; + U value2; +}; + + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using absl::test_internal::CopyableMovableInstance; +using absl::test_internal::InstanceTracker; + +TEST(CompressedTupleTest, Sizeof) { + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple)); + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple>)); + EXPECT_EQ(sizeof(int), sizeof(CompressedTuple, Empty<1>>)); + EXPECT_EQ(sizeof(int), + sizeof(CompressedTuple, Empty<1>, Empty<2>>)); + + EXPECT_EQ(sizeof(TwoValues), + sizeof(CompressedTuple>)); + EXPECT_EQ(sizeof(TwoValues), + sizeof(CompressedTuple, NotEmpty>)); + EXPECT_EQ(sizeof(TwoValues), + sizeof(CompressedTuple, NotEmpty, Empty<1>>)); +} + +TEST(CompressedTupleTest, OneMoveOnRValueConstructionTemp) { + InstanceTracker tracker; + CompressedTuple x1(CopyableMovableInstance(1)); + EXPECT_EQ(tracker.instances(), 1); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_LE(tracker.moves(), 1); + EXPECT_EQ(x1.get<0>().value(), 1); +} + +TEST(CompressedTupleTest, OneMoveOnRValueConstructionMove) { + InstanceTracker tracker; + + CopyableMovableInstance i1(1); + CompressedTuple x1(std::move(i1)); + EXPECT_EQ(tracker.instances(), 2); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_LE(tracker.moves(), 1); + EXPECT_EQ(x1.get<0>().value(), 1); +} + +TEST(CompressedTupleTest, OneMoveOnRValueConstructionMixedTypes) { + InstanceTracker tracker; + CopyableMovableInstance i1(1); + CopyableMovableInstance i2(2); + Empty<0> empty; + CompressedTuple> + x1(std::move(i1), i2, empty); + EXPECT_EQ(x1.get<0>().value(), 1); + EXPECT_EQ(x1.get<1>().value(), 2); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 1); +} + +struct IncompleteType; +CompressedTuple> +MakeWithIncomplete(CopyableMovableInstance i1, + IncompleteType& t, // NOLINT + Empty<0> empty) { + return CompressedTuple>{ + std::move(i1), t, empty}; +} + +struct IncompleteType {}; +TEST(CompressedTupleTest, OneMoveOnRValueConstructionWithIncompleteType) { + InstanceTracker tracker; + CopyableMovableInstance i1(1); + Empty<0> empty; + struct DerivedType : IncompleteType {int value = 0;}; + DerivedType fd; + fd.value = 7; + + CompressedTuple> x1 = + MakeWithIncomplete(std::move(i1), fd, empty); + + EXPECT_EQ(x1.get<0>().value(), 1); + EXPECT_EQ(static_cast(x1.get<1>()).value, 7); + + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 2); +} + +TEST(CompressedTupleTest, + OneMoveOnRValueConstructionMixedTypes_BraceInitPoisonPillExpected) { + InstanceTracker tracker; + CopyableMovableInstance i1(1); + CopyableMovableInstance i2(2); + CompressedTuple> + x1(std::move(i1), i2, {}); // NOLINT + EXPECT_EQ(x1.get<0>().value(), 1); + EXPECT_EQ(x1.get<1>().value(), 2); + EXPECT_EQ(tracker.instances(), 3); + // We are forced into the `const Ts&...` constructor (invoking copies) + // because we need it to deduce the type of `{}`. + // std::tuple also has this behavior. + // Note, this test is proof that this is expected behavior, but it is not + // _desired_ behavior. + EXPECT_EQ(tracker.copies(), 1); + EXPECT_EQ(tracker.moves(), 0); +} + +TEST(CompressedTupleTest, OneCopyOnLValueConstruction) { + InstanceTracker tracker; + CopyableMovableInstance i1(1); + + CompressedTuple x1(i1); + EXPECT_EQ(tracker.copies(), 1); + EXPECT_EQ(tracker.moves(), 0); + + tracker.ResetCopiesMovesSwaps(); + + CopyableMovableInstance i2(2); + const CopyableMovableInstance& i2_ref = i2; + CompressedTuple x2(i2_ref); + EXPECT_EQ(tracker.copies(), 1); + EXPECT_EQ(tracker.moves(), 0); +} + +TEST(CompressedTupleTest, OneMoveOnRValueAccess) { + InstanceTracker tracker; + CopyableMovableInstance i1(1); + CompressedTuple x(std::move(i1)); + tracker.ResetCopiesMovesSwaps(); + + CopyableMovableInstance i2 = std::move(x).get<0>(); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 1); +} + +TEST(CompressedTupleTest, OneCopyOnLValueAccess) { + InstanceTracker tracker; + + CompressedTuple x(CopyableMovableInstance(0)); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 1); + + CopyableMovableInstance t = x.get<0>(); + EXPECT_EQ(tracker.copies(), 1); + EXPECT_EQ(tracker.moves(), 1); +} + +TEST(CompressedTupleTest, ZeroCopyOnRefAccess) { + InstanceTracker tracker; + + CompressedTuple x(CopyableMovableInstance(0)); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 1); + + CopyableMovableInstance& t1 = x.get<0>(); + const CopyableMovableInstance& t2 = x.get<0>(); + EXPECT_EQ(tracker.copies(), 0); + EXPECT_EQ(tracker.moves(), 1); + EXPECT_EQ(t1.value(), 0); + EXPECT_EQ(t2.value(), 0); +} + +TEST(CompressedTupleTest, Access) { + struct S { + std::string x; + }; + CompressedTuple, S> x(7, {}, S{"ABC"}); + EXPECT_EQ(sizeof(x), sizeof(TwoValues)); + EXPECT_EQ(7, x.get<0>()); + EXPECT_EQ("ABC", x.get<2>().x); +} + +TEST(CompressedTupleTest, NonClasses) { + CompressedTuple x(7, "ABC"); + EXPECT_EQ(7, x.get<0>()); + EXPECT_STREQ("ABC", x.get<1>()); +} + +TEST(CompressedTupleTest, MixClassAndNonClass) { + CompressedTuple, NotEmpty> x(7, "ABC", {}, + {1.25}); + struct Mock { + int v; + const char* p; + double d; + }; + EXPECT_EQ(sizeof(x), sizeof(Mock)); + EXPECT_EQ(7, x.get<0>()); + EXPECT_STREQ("ABC", x.get<1>()); + EXPECT_EQ(1.25, x.get<3>().value); +} + +TEST(CompressedTupleTest, Nested) { + CompressedTuple, + CompressedTuple>> + x(1, CompressedTuple(2), + CompressedTuple>(3, CompressedTuple(4))); + EXPECT_EQ(1, x.get<0>()); + EXPECT_EQ(2, x.get<1>().get<0>()); + EXPECT_EQ(3, x.get<2>().get<0>()); + EXPECT_EQ(4, x.get<2>().get<1>().get<0>()); + + CompressedTuple, Empty<0>, + CompressedTuple, CompressedTuple>>> + y; + std::set*> empties{&y.get<0>(), &y.get<1>(), &y.get<2>().get<0>(), + &y.get<2>().get<1>().get<0>()}; +#ifdef _MSC_VER + // MSVC has a bug where many instances of the same base class are layed out in + // the same address when using __declspec(empty_bases). + // This will be fixed in a future version of MSVC. + int expected = 1; +#else + int expected = 4; +#endif + EXPECT_EQ(expected, sizeof(y)); + EXPECT_EQ(expected, empties.size()); + EXPECT_EQ(sizeof(y), sizeof(Empty<0>) * empties.size()); + + EXPECT_EQ(4 * sizeof(char), + sizeof(CompressedTuple, + CompressedTuple>)); + EXPECT_TRUE((std::is_empty, Empty<1>>>::value)); + + // Make sure everything still works when things are nested. + struct CT_Empty : CompressedTuple> {}; + CompressedTuple, CT_Empty> nested_empty; + auto contained = nested_empty.get<0>(); + auto nested = nested_empty.get<1>().get<0>(); + EXPECT_TRUE((std::is_same::value)); +} + +TEST(CompressedTupleTest, Reference) { + int i = 7; + std::string s = "Very long string that goes in the heap"; + CompressedTuple x(i, i, s, s); + + // Sanity check. We should have not moved from `s` + EXPECT_EQ(s, "Very long string that goes in the heap"); + + EXPECT_EQ(x.get<0>(), x.get<1>()); + EXPECT_NE(&x.get<0>(), &x.get<1>()); + EXPECT_EQ(&x.get<1>(), &i); + + EXPECT_EQ(x.get<2>(), x.get<3>()); + EXPECT_NE(&x.get<2>(), &x.get<3>()); + EXPECT_EQ(&x.get<3>(), &s); +} + +TEST(CompressedTupleTest, NoElements) { + CompressedTuple<> x; + static_cast(x); // Silence -Wunused-variable. + EXPECT_TRUE(std::is_empty>::value); +} + +TEST(CompressedTupleTest, MoveOnlyElements) { + CompressedTuple> str_tup( + absl::make_unique("str")); + + CompressedTuple>, + std::unique_ptr> + x(std::move(str_tup), absl::make_unique(5)); + + EXPECT_EQ(*x.get<0>().get<0>(), "str"); + EXPECT_EQ(*x.get<1>(), 5); + + std::unique_ptr x0 = std::move(x.get<0>()).get<0>(); + std::unique_ptr x1 = std::move(x).get<1>(); + + EXPECT_EQ(*x0, "str"); + EXPECT_EQ(*x1, 5); +} + +TEST(CompressedTupleTest, MoveConstructionMoveOnlyElements) { + CompressedTuple> base( + absl::make_unique("str")); + EXPECT_EQ(*base.get<0>(), "str"); + + CompressedTuple> copy(std::move(base)); + EXPECT_EQ(*copy.get<0>(), "str"); +} + +TEST(CompressedTupleTest, AnyElements) { + any a(std::string("str")); + CompressedTuple x(any(5), a); + EXPECT_EQ(absl::any_cast(x.get<0>()), 5); + EXPECT_EQ(absl::any_cast(x.get<1>()), "str"); + + a = 0.5f; + EXPECT_EQ(absl::any_cast(x.get<1>()), 0.5); +} + +TEST(CompressedTupleTest, Constexpr) { + struct NonTrivialStruct { + constexpr NonTrivialStruct() = default; + constexpr int value() const { return v; } + int v = 5; + }; + struct TrivialStruct { + TrivialStruct() = default; + constexpr int value() const { return v; } + int v; + }; + constexpr CompressedTuple, Empty<0>> x( + 7, 1.25, CompressedTuple(5), {}); + constexpr int x0 = x.get<0>(); + constexpr double x1 = x.get<1>(); + constexpr int x2 = x.get<2>().get<0>(); + constexpr CallType x3 = x.get<3>().value(); + + EXPECT_EQ(x0, 7); + EXPECT_EQ(x1, 1.25); + EXPECT_EQ(x2, 5); + EXPECT_EQ(x3, CallType::kConstRef); + +#if !defined(__GNUC__) || defined(__clang__) || __GNUC__ > 4 + constexpr CompressedTuple, TrivialStruct, int> trivial = {}; + constexpr CallType trivial0 = trivial.get<0>().value(); + constexpr int trivial1 = trivial.get<1>().value(); + constexpr int trivial2 = trivial.get<2>(); + + EXPECT_EQ(trivial0, CallType::kConstRef); + EXPECT_EQ(trivial1, 0); + EXPECT_EQ(trivial2, 0); +#endif + + constexpr CompressedTuple, NonTrivialStruct, absl::optional> + non_trivial = {}; + constexpr CallType non_trivial0 = non_trivial.get<0>().value(); + constexpr int non_trivial1 = non_trivial.get<1>().value(); + constexpr absl::optional non_trivial2 = non_trivial.get<2>(); + + EXPECT_EQ(non_trivial0, CallType::kConstRef); + EXPECT_EQ(non_trivial1, 5); + EXPECT_EQ(non_trivial2, absl::nullopt); + + static constexpr char data[] = "DEF"; + constexpr CompressedTuple z(data); + constexpr const char* z1 = z.get<0>(); + EXPECT_EQ(std::string(z1), std::string(data)); + +#if defined(__clang__) + // An apparent bug in earlier versions of gcc claims these are ambiguous. + constexpr int x2m = absl::move(x.get<2>()).get<0>(); + constexpr CallType x3m = absl::move(x).get<3>().value(); + EXPECT_EQ(x2m, 5); + EXPECT_EQ(x3m, CallType::kConstMove); +#endif +} + +#if defined(__clang__) || defined(__GNUC__) +TEST(CompressedTupleTest, EmptyFinalClass) { + struct S final { + int f() const { return 5; } + }; + CompressedTuple x; + EXPECT_EQ(x.get<0>().f(), 5); +} +#endif + +// TODO(b/214288561): enable this test. +TEST(CompressedTupleTest, DISABLED_NestedEbo) { + struct Empty1 {}; + struct Empty2 {}; + CompressedTuple, int> x; + CompressedTuple y; + // Currently fails with sizeof(x) == 8, sizeof(y) == 4. + EXPECT_EQ(sizeof(x), sizeof(y)); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/container_memory.h b/TMessagesProj/jni/voip/webrtc/absl/container/internal/container_memory.h index 3487ac189..e67529ecb 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/container_memory.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/container_memory.h @@ -15,25 +15,27 @@ #ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ #define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ -#ifdef ADDRESS_SANITIZER -#include -#endif - -#ifdef MEMORY_SANITIZER -#include -#endif - #include #include #include +#include #include #include #include +#include "absl/base/config.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" #include "absl/utility/utility.h" +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +#include +#endif + +#ifdef ABSL_HAVE_MEMORY_SANITIZER +#include +#endif + namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { @@ -55,8 +57,11 @@ void* Allocate(Alloc* alloc, size_t n) { using M = AlignedType; using A = typename absl::allocator_traits::template rebind_alloc; using AT = typename absl::allocator_traits::template rebind_traits; - A mem_alloc(*alloc); - void* p = AT::allocate(mem_alloc, (n + sizeof(M) - 1) / sizeof(M)); + // On macOS, "mem_alloc" is a #define with one argument defined in + // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it + // with the "foo(bar)" syntax. + A my_mem_alloc(*alloc); + void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M)); assert(reinterpret_cast(p) % Alignment == 0 && "allocator does not respect alignment"); return p; @@ -71,8 +76,11 @@ void Deallocate(Alloc* alloc, void* p, size_t n) { using M = AlignedType; using A = typename absl::allocator_traits::template rebind_alloc; using AT = typename absl::allocator_traits::template rebind_traits; - A mem_alloc(*alloc); - AT::deallocate(mem_alloc, static_cast(p), + // On macOS, "mem_alloc" is a #define with one argument defined in + // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it + // with the "foo(bar)" syntax. + A my_mem_alloc(*alloc); + AT::deallocate(my_mem_alloc, static_cast(p), (n + sizeof(M) - 1) / sizeof(M)); } @@ -209,10 +217,10 @@ DecomposeValue(F&& f, Arg&& arg) { // Helper functions for asan and msan. inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) { -#ifdef ADDRESS_SANITIZER +#ifdef ABSL_HAVE_ADDRESS_SANITIZER ASAN_POISON_MEMORY_REGION(m, s); #endif -#ifdef MEMORY_SANITIZER +#ifdef ABSL_HAVE_MEMORY_SANITIZER __msan_poison(m, s); #endif (void)m; @@ -220,10 +228,10 @@ inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) { } inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) { -#ifdef ADDRESS_SANITIZER +#ifdef ABSL_HAVE_ADDRESS_SANITIZER ASAN_UNPOISON_MEMORY_REGION(m, s); #endif -#ifdef MEMORY_SANITIZER +#ifdef ABSL_HAVE_MEMORY_SANITIZER __msan_unpoison(m, s); #endif (void)m; @@ -250,8 +258,8 @@ namespace memory_internal { // type, which is non-portable. template struct OffsetOf { - static constexpr size_t kFirst = -1; - static constexpr size_t kSecond = -1; + static constexpr size_t kFirst = static_cast(-1); + static constexpr size_t kSecond = static_cast(-1); }; template @@ -351,6 +359,20 @@ struct map_slot_policy { return slot->value; } + // When C++17 is available, we can use std::launder to provide mutable + // access to the key for use in node handle. +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 + static K& mutable_key(slot_type* slot) { + // Still check for kMutableKeys so that we can avoid calling std::launder + // unless necessary because it can interfere with optimizations. + return kMutableKeys::value ? slot->key + : *std::launder(const_cast( + std::addressof(slot->value.first))); + } +#else // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606) + static const K& mutable_key(slot_type* slot) { return key(slot); } +#endif + static const K& key(const slot_type* slot) { return kMutableKeys::value ? slot->key : slot->value.first; } @@ -429,13 +451,6 @@ struct map_slot_policy { std::move(src->value)); } } - - template - static void move(Allocator* alloc, slot_type* first, slot_type* last, - slot_type* result) { - for (slot_type *src = first, *dest = result; src != last; ++src, ++dest) - move(alloc, src, dest); - } }; } // namespace container_internal diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/container_memory_test.cc b/TMessagesProj/jni/voip/webrtc/absl/container/internal/container_memory_test.cc new file mode 100644 index 000000000..fb9c4dded --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/container_memory_test.cc @@ -0,0 +1,257 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/container_memory.h" + +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/test_instance_tracker.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::absl::test_internal::CopyableMovableInstance; +using ::absl::test_internal::InstanceTracker; +using ::testing::_; +using ::testing::ElementsAre; +using ::testing::Gt; +using ::testing::Pair; + +TEST(Memory, AlignmentLargerThanBase) { + std::allocator alloc; + void* mem = Allocate<2>(&alloc, 3); + EXPECT_EQ(0, reinterpret_cast(mem) % 2); + memcpy(mem, "abc", 3); + Deallocate<2>(&alloc, mem, 3); +} + +TEST(Memory, AlignmentSmallerThanBase) { + std::allocator alloc; + void* mem = Allocate<2>(&alloc, 3); + EXPECT_EQ(0, reinterpret_cast(mem) % 2); + memcpy(mem, "abc", 3); + Deallocate<2>(&alloc, mem, 3); +} + +std::map& AllocationMap() { + static auto* map = new std::map; + return *map; +} + +template +struct TypeCountingAllocator { + TypeCountingAllocator() = default; + template + TypeCountingAllocator(const TypeCountingAllocator&) {} // NOLINT + + using value_type = T; + + T* allocate(size_t n, const void* = nullptr) { + AllocationMap()[typeid(T)] += n; + return std::allocator().allocate(n); + } + void deallocate(T* p, std::size_t n) { + AllocationMap()[typeid(T)] -= n; + return std::allocator().deallocate(p, n); + } +}; + +TEST(Memory, AllocateDeallocateMatchType) { + TypeCountingAllocator alloc; + void* mem = Allocate<1>(&alloc, 1); + // Verify that it was allocated + EXPECT_THAT(AllocationMap(), ElementsAre(Pair(_, Gt(0)))); + Deallocate<1>(&alloc, mem, 1); + // Verify that the deallocation matched. + EXPECT_THAT(AllocationMap(), ElementsAre(Pair(_, 0))); +} + +class Fixture : public ::testing::Test { + using Alloc = std::allocator; + + public: + Fixture() { ptr_ = std::allocator_traits::allocate(*alloc(), 1); } + ~Fixture() override { + std::allocator_traits::destroy(*alloc(), ptr_); + std::allocator_traits::deallocate(*alloc(), ptr_, 1); + } + std::string* ptr() { return ptr_; } + Alloc* alloc() { return &alloc_; } + + private: + Alloc alloc_; + std::string* ptr_; +}; + +TEST_F(Fixture, ConstructNoArgs) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple()); + EXPECT_EQ(*ptr(), ""); +} + +TEST_F(Fixture, ConstructOneArg) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple("abcde")); + EXPECT_EQ(*ptr(), "abcde"); +} + +TEST_F(Fixture, ConstructTwoArg) { + ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple(5, 'a')); + EXPECT_EQ(*ptr(), "aaaaa"); +} + +TEST(PairArgs, NoArgs) { + EXPECT_THAT(PairArgs(), + Pair(std::forward_as_tuple(), std::forward_as_tuple())); +} + +TEST(PairArgs, TwoArgs) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(1, 'A')); +} + +TEST(PairArgs, Pair) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(std::make_pair(1, 'A'))); +} + +TEST(PairArgs, Piecewise) { + EXPECT_EQ( + std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), + PairArgs(std::piecewise_construct, std::forward_as_tuple(1), + std::forward_as_tuple('A'))); +} + +TEST(WithConstructed, Simple) { + EXPECT_EQ(1, WithConstructed( + std::make_tuple(std::string("a")), + [](absl::string_view str) { return str.size(); })); +} + +template +decltype(DecomposeValue(std::declval(), std::declval())) +DecomposeValueImpl(int, F&& f, Arg&& arg) { + return DecomposeValue(std::forward(f), std::forward(arg)); +} + +template +const char* DecomposeValueImpl(char, F&& f, Arg&& arg) { + return "not decomposable"; +} + +template +decltype(DecomposeValueImpl(0, std::declval(), std::declval())) +TryDecomposeValue(F&& f, Arg&& arg) { + return DecomposeValueImpl(0, std::forward(f), std::forward(arg)); +} + +TEST(DecomposeValue, Decomposable) { + auto f = [](const int& x, int&& y) { // NOLINT + EXPECT_EQ(&x, &y); + EXPECT_EQ(42, x); + return 'A'; + }; + EXPECT_EQ('A', TryDecomposeValue(f, 42)); +} + +TEST(DecomposeValue, NotDecomposable) { + auto f = [](void*) { + ADD_FAILURE() << "Must not be called"; + return 'A'; + }; + EXPECT_STREQ("not decomposable", TryDecomposeValue(f, 42)); +} + +template +decltype(DecomposePair(std::declval(), std::declval()...)) +DecomposePairImpl(int, F&& f, Args&&... args) { + return DecomposePair(std::forward(f), std::forward(args)...); +} + +template +const char* DecomposePairImpl(char, F&& f, Args&&... args) { + return "not decomposable"; +} + +template +decltype(DecomposePairImpl(0, std::declval(), std::declval()...)) +TryDecomposePair(F&& f, Args&&... args) { + return DecomposePairImpl(0, std::forward(f), std::forward(args)...); +} + +TEST(DecomposePair, Decomposable) { + auto f = [](const int& x, // NOLINT + std::piecewise_construct_t, std::tuple k, + std::tuple&& v) { + EXPECT_EQ(&x, &std::get<0>(k)); + EXPECT_EQ(42, x); + EXPECT_EQ(0.5, std::get<0>(v)); + return 'A'; + }; + EXPECT_EQ('A', TryDecomposePair(f, 42, 0.5)); + EXPECT_EQ('A', TryDecomposePair(f, std::make_pair(42, 0.5))); + EXPECT_EQ('A', TryDecomposePair(f, std::piecewise_construct, + std::make_tuple(42), std::make_tuple(0.5))); +} + +TEST(DecomposePair, NotDecomposable) { + auto f = [](...) { + ADD_FAILURE() << "Must not be called"; + return 'A'; + }; + EXPECT_STREQ("not decomposable", + TryDecomposePair(f)); + EXPECT_STREQ("not decomposable", + TryDecomposePair(f, std::piecewise_construct, std::make_tuple(), + std::make_tuple(0.5))); +} + +TEST(MapSlotPolicy, ConstKeyAndValue) { + using slot_policy = map_slot_policy; + using slot_type = typename slot_policy::slot_type; + + union Slots { + Slots() {} + ~Slots() {} + slot_type slots[100]; + } slots; + + std::allocator< + std::pair> + alloc; + InstanceTracker tracker; + slot_policy::construct(&alloc, &slots.slots[0], CopyableMovableInstance(1), + CopyableMovableInstance(1)); + for (int i = 0; i < 99; ++i) { + slot_policy::transfer(&alloc, &slots.slots[i + 1], &slots.slots[i]); + } + slot_policy::destroy(&alloc, &slots.slots[99]); + + EXPECT_EQ(tracker.copies(), 0); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/counting_allocator.h b/TMessagesProj/jni/voip/webrtc/absl/container/internal/counting_allocator.h index 9efdc6621..927cf0825 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/counting_allocator.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/counting_allocator.h @@ -15,7 +15,6 @@ #ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ #define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ -#include #include #include @@ -31,33 +30,63 @@ namespace container_internal { // containers - that chain of allocators uses the same state and is // thus easier to query for aggregate allocation information. template -class CountingAllocator : public std::allocator { +class CountingAllocator { public: - using Alloc = std::allocator; - using pointer = typename Alloc::pointer; - using size_type = typename Alloc::size_type; + using Allocator = std::allocator; + using AllocatorTraits = std::allocator_traits; + using value_type = typename AllocatorTraits::value_type; + using pointer = typename AllocatorTraits::pointer; + using const_pointer = typename AllocatorTraits::const_pointer; + using size_type = typename AllocatorTraits::size_type; + using difference_type = typename AllocatorTraits::difference_type; - CountingAllocator() : bytes_used_(nullptr) {} - explicit CountingAllocator(int64_t* b) : bytes_used_(b) {} + CountingAllocator() = default; + explicit CountingAllocator(int64_t* bytes_used) : bytes_used_(bytes_used) {} + CountingAllocator(int64_t* bytes_used, int64_t* instance_count) + : bytes_used_(bytes_used), instance_count_(instance_count) {} template CountingAllocator(const CountingAllocator& x) - : Alloc(x), bytes_used_(x.bytes_used_) {} + : bytes_used_(x.bytes_used_), instance_count_(x.instance_count_) {} - pointer allocate(size_type n, - std::allocator::const_pointer hint = nullptr) { - assert(bytes_used_ != nullptr); - *bytes_used_ += n * sizeof(T); - return Alloc::allocate(n, hint); + pointer allocate( + size_type n, + typename AllocatorTraits::const_void_pointer hint = nullptr) { + Allocator allocator; + pointer ptr = AllocatorTraits::allocate(allocator, n, hint); + if (bytes_used_ != nullptr) { + *bytes_used_ += n * sizeof(T); + } + return ptr; } void deallocate(pointer p, size_type n) { - Alloc::deallocate(p, n); - assert(bytes_used_ != nullptr); - *bytes_used_ -= n * sizeof(T); + Allocator allocator; + AllocatorTraits::deallocate(allocator, p, n); + if (bytes_used_ != nullptr) { + *bytes_used_ -= n * sizeof(T); + } } - template + template + void construct(U* p, Args&&... args) { + Allocator allocator; + AllocatorTraits::construct(allocator, p, std::forward(args)...); + if (instance_count_ != nullptr) { + *instance_count_ += 1; + } + } + + template + void destroy(U* p) { + Allocator allocator; + AllocatorTraits::destroy(allocator, p); + if (instance_count_ != nullptr) { + *instance_count_ -= 1; + } + } + + template class rebind { public: using other = CountingAllocator; @@ -65,7 +94,8 @@ class CountingAllocator : public std::allocator { friend bool operator==(const CountingAllocator& a, const CountingAllocator& b) { - return a.bytes_used_ == b.bytes_used_; + return a.bytes_used_ == b.bytes_used_ && + a.instance_count_ == b.instance_count_; } friend bool operator!=(const CountingAllocator& a, @@ -73,7 +103,8 @@ class CountingAllocator : public std::allocator { return !(a == b); } - int64_t* bytes_used_; + int64_t* bytes_used_ = nullptr; + int64_t* instance_count_ = nullptr; }; } // namespace container_internal diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_function_defaults.h b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_function_defaults.h index 0683422ad..250e662c9 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_function_defaults.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_function_defaults.h @@ -78,24 +78,26 @@ struct StringHash { } }; +struct StringEq { + using is_transparent = void; + bool operator()(absl::string_view lhs, absl::string_view rhs) const { + return lhs == rhs; + } + bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const { + return lhs == rhs; + } + bool operator()(const absl::Cord& lhs, absl::string_view rhs) const { + return lhs == rhs; + } + bool operator()(absl::string_view lhs, const absl::Cord& rhs) const { + return lhs == rhs; + } +}; + // Supports heterogeneous lookup for string-like elements. struct StringHashEq { using Hash = StringHash; - struct Eq { - using is_transparent = void; - bool operator()(absl::string_view lhs, absl::string_view rhs) const { - return lhs == rhs; - } - bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const { - return lhs == rhs; - } - bool operator()(const absl::Cord& lhs, absl::string_view rhs) const { - return lhs == rhs; - } - bool operator()(absl::string_view lhs, const absl::Cord& rhs) const { - return lhs == rhs; - } - }; + using Eq = StringEq; }; template <> diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_function_defaults_test.cc b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_function_defaults_test.cc new file mode 100644 index 000000000..9f0a4c72c --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_function_defaults_test.cc @@ -0,0 +1,383 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hash_function_defaults.h" + +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/random/random.h" +#include "absl/strings/cord.h" +#include "absl/strings/cord_test_helpers.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::testing::Types; + +TEST(Eq, Int32) { + hash_default_eq eq; + EXPECT_TRUE(eq(1, 1u)); + EXPECT_TRUE(eq(1, char{1})); + EXPECT_TRUE(eq(1, true)); + EXPECT_TRUE(eq(1, double{1.1})); + EXPECT_FALSE(eq(1, char{2})); + EXPECT_FALSE(eq(1, 2u)); + EXPECT_FALSE(eq(1, false)); + EXPECT_FALSE(eq(1, 2.)); +} + +TEST(Hash, Int32) { + hash_default_hash hash; + auto h = hash(1); + EXPECT_EQ(h, hash(1u)); + EXPECT_EQ(h, hash(char{1})); + EXPECT_EQ(h, hash(true)); + EXPECT_EQ(h, hash(double{1.1})); + EXPECT_NE(h, hash(2u)); + EXPECT_NE(h, hash(char{2})); + EXPECT_NE(h, hash(false)); + EXPECT_NE(h, hash(2.)); +} + +enum class MyEnum { A, B, C, D }; + +TEST(Eq, Enum) { + hash_default_eq eq; + EXPECT_TRUE(eq(MyEnum::A, MyEnum::A)); + EXPECT_FALSE(eq(MyEnum::A, MyEnum::B)); +} + +TEST(Hash, Enum) { + hash_default_hash hash; + + for (MyEnum e : {MyEnum::A, MyEnum::B, MyEnum::C}) { + auto h = hash(e); + EXPECT_EQ(h, hash_default_hash{}(static_cast(e))); + EXPECT_NE(h, hash(MyEnum::D)); + } +} + +using StringTypes = ::testing::Types; + +template +struct EqString : ::testing::Test { + hash_default_eq key_eq; +}; + +TYPED_TEST_SUITE(EqString, StringTypes); + +template +struct HashString : ::testing::Test { + hash_default_hash hasher; +}; + +TYPED_TEST_SUITE(HashString, StringTypes); + +TYPED_TEST(EqString, Works) { + auto eq = this->key_eq; + EXPECT_TRUE(eq("a", "a")); + EXPECT_TRUE(eq("a", absl::string_view("a"))); + EXPECT_TRUE(eq("a", std::string("a"))); + EXPECT_FALSE(eq("a", "b")); + EXPECT_FALSE(eq("a", absl::string_view("b"))); + EXPECT_FALSE(eq("a", std::string("b"))); +} + +TYPED_TEST(HashString, Works) { + auto hash = this->hasher; + auto h = hash("a"); + EXPECT_EQ(h, hash(absl::string_view("a"))); + EXPECT_EQ(h, hash(std::string("a"))); + EXPECT_NE(h, hash(absl::string_view("b"))); + EXPECT_NE(h, hash(std::string("b"))); +} + +struct NoDeleter { + template + void operator()(const T* ptr) const {} +}; + +using PointerTypes = + ::testing::Types, + std::unique_ptr, + std::unique_ptr, std::unique_ptr, + std::shared_ptr, std::shared_ptr>; + +template +struct EqPointer : ::testing::Test { + hash_default_eq key_eq; +}; + +TYPED_TEST_SUITE(EqPointer, PointerTypes); + +template +struct HashPointer : ::testing::Test { + hash_default_hash hasher; +}; + +TYPED_TEST_SUITE(HashPointer, PointerTypes); + +TYPED_TEST(EqPointer, Works) { + int dummy; + auto eq = this->key_eq; + auto sptr = std::make_shared(); + std::shared_ptr csptr = sptr; + int* ptr = sptr.get(); + const int* cptr = ptr; + std::unique_ptr uptr(ptr); + std::unique_ptr cuptr(ptr); + + EXPECT_TRUE(eq(ptr, cptr)); + EXPECT_TRUE(eq(ptr, sptr)); + EXPECT_TRUE(eq(ptr, uptr)); + EXPECT_TRUE(eq(ptr, csptr)); + EXPECT_TRUE(eq(ptr, cuptr)); + EXPECT_FALSE(eq(&dummy, cptr)); + EXPECT_FALSE(eq(&dummy, sptr)); + EXPECT_FALSE(eq(&dummy, uptr)); + EXPECT_FALSE(eq(&dummy, csptr)); + EXPECT_FALSE(eq(&dummy, cuptr)); +} + +TEST(Hash, DerivedAndBase) { + struct Base {}; + struct Derived : Base {}; + + hash_default_hash hasher; + + Base base; + Derived derived; + EXPECT_NE(hasher(&base), hasher(&derived)); + EXPECT_EQ(hasher(static_cast(&derived)), hasher(&derived)); + + auto dp = std::make_shared(); + EXPECT_EQ(hasher(static_cast(dp.get())), hasher(dp)); +} + +TEST(Hash, FunctionPointer) { + using Func = int (*)(); + hash_default_hash hasher; + hash_default_eq eq; + + Func p1 = [] { return 1; }, p2 = [] { return 2; }; + EXPECT_EQ(hasher(p1), hasher(p1)); + EXPECT_TRUE(eq(p1, p1)); + + EXPECT_NE(hasher(p1), hasher(p2)); + EXPECT_FALSE(eq(p1, p2)); +} + +TYPED_TEST(HashPointer, Works) { + int dummy; + auto hash = this->hasher; + auto sptr = std::make_shared(); + std::shared_ptr csptr = sptr; + int* ptr = sptr.get(); + const int* cptr = ptr; + std::unique_ptr uptr(ptr); + std::unique_ptr cuptr(ptr); + + EXPECT_EQ(hash(ptr), hash(cptr)); + EXPECT_EQ(hash(ptr), hash(sptr)); + EXPECT_EQ(hash(ptr), hash(uptr)); + EXPECT_EQ(hash(ptr), hash(csptr)); + EXPECT_EQ(hash(ptr), hash(cuptr)); + EXPECT_NE(hash(&dummy), hash(cptr)); + EXPECT_NE(hash(&dummy), hash(sptr)); + EXPECT_NE(hash(&dummy), hash(uptr)); + EXPECT_NE(hash(&dummy), hash(csptr)); + EXPECT_NE(hash(&dummy), hash(cuptr)); +} + +TEST(EqCord, Works) { + hash_default_eq eq; + const absl::string_view a_string_view = "a"; + const absl::Cord a_cord(a_string_view); + const absl::string_view b_string_view = "b"; + const absl::Cord b_cord(b_string_view); + + EXPECT_TRUE(eq(a_cord, a_cord)); + EXPECT_TRUE(eq(a_cord, a_string_view)); + EXPECT_TRUE(eq(a_string_view, a_cord)); + EXPECT_FALSE(eq(a_cord, b_cord)); + EXPECT_FALSE(eq(a_cord, b_string_view)); + EXPECT_FALSE(eq(b_string_view, a_cord)); +} + +TEST(HashCord, Works) { + hash_default_hash hash; + const absl::string_view a_string_view = "a"; + const absl::Cord a_cord(a_string_view); + const absl::string_view b_string_view = "b"; + const absl::Cord b_cord(b_string_view); + + EXPECT_EQ(hash(a_cord), hash(a_cord)); + EXPECT_EQ(hash(b_cord), hash(b_cord)); + EXPECT_EQ(hash(a_string_view), hash(a_cord)); + EXPECT_EQ(hash(b_string_view), hash(b_cord)); + EXPECT_EQ(hash(absl::Cord("")), hash("")); + EXPECT_EQ(hash(absl::Cord()), hash(absl::string_view())); + + EXPECT_NE(hash(a_cord), hash(b_cord)); + EXPECT_NE(hash(a_cord), hash(b_string_view)); + EXPECT_NE(hash(a_string_view), hash(b_cord)); + EXPECT_NE(hash(a_string_view), hash(b_string_view)); +} + +void NoOpReleaser(absl::string_view data, void* arg) {} + +TEST(HashCord, FragmentedCordWorks) { + hash_default_hash hash; + absl::Cord c = absl::MakeFragmentedCord({"a", "b", "c"}); + EXPECT_FALSE(c.TryFlat().has_value()); + EXPECT_EQ(hash(c), hash("abc")); +} + +TEST(HashCord, FragmentedLongCordWorks) { + hash_default_hash hash; + // Crete some large strings which do not fit on the stack. + std::string a(65536, 'a'); + std::string b(65536, 'b'); + absl::Cord c = absl::MakeFragmentedCord({a, b}); + EXPECT_FALSE(c.TryFlat().has_value()); + EXPECT_EQ(hash(c), hash(a + b)); +} + +TEST(HashCord, RandomCord) { + hash_default_hash hash; + auto bitgen = absl::BitGen(); + for (int i = 0; i < 1000; ++i) { + const int number_of_segments = absl::Uniform(bitgen, 0, 10); + std::vector pieces; + for (size_t s = 0; s < number_of_segments; ++s) { + std::string str; + str.resize(absl::Uniform(bitgen, 0, 4096)); + // MSVC needed the explicit return type in the lambda. + std::generate(str.begin(), str.end(), [&]() -> char { + return static_cast(absl::Uniform(bitgen)); + }); + pieces.push_back(str); + } + absl::Cord c = absl::MakeFragmentedCord(pieces); + EXPECT_EQ(hash(c), hash(std::string(c))); + } +} + +// Cartesian product of (std::string, absl::string_view) +// with (std::string, absl::string_view, const char*, absl::Cord). +using StringTypesCartesianProduct = Types< + // clang-format off + std::pair, + std::pair, + std::pair, + std::pair, + + std::pair, + std::pair, + + std::pair, + std::pair, + std::pair>; +// clang-format on + +constexpr char kFirstString[] = "abc123"; +constexpr char kSecondString[] = "ijk456"; + +template +struct StringLikeTest : public ::testing::Test { + typename T::first_type a1{kFirstString}; + typename T::second_type b1{kFirstString}; + typename T::first_type a2{kSecondString}; + typename T::second_type b2{kSecondString}; + hash_default_eq eq; + hash_default_hash hash; +}; + +TYPED_TEST_SUITE_P(StringLikeTest); + +TYPED_TEST_P(StringLikeTest, Eq) { + EXPECT_TRUE(this->eq(this->a1, this->b1)); + EXPECT_TRUE(this->eq(this->b1, this->a1)); +} + +TYPED_TEST_P(StringLikeTest, NotEq) { + EXPECT_FALSE(this->eq(this->a1, this->b2)); + EXPECT_FALSE(this->eq(this->b2, this->a1)); +} + +TYPED_TEST_P(StringLikeTest, HashEq) { + EXPECT_EQ(this->hash(this->a1), this->hash(this->b1)); + EXPECT_EQ(this->hash(this->a2), this->hash(this->b2)); + // It would be a poor hash function which collides on these strings. + EXPECT_NE(this->hash(this->a1), this->hash(this->b2)); +} + +TYPED_TEST_SUITE(StringLikeTest, StringTypesCartesianProduct); + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +enum Hash : size_t { + kStd = 0x1, // std::hash +#ifdef _MSC_VER + kExtension = kStd, // In MSVC, std::hash == ::hash +#else // _MSC_VER + kExtension = 0x2, // ::hash (GCC extension) +#endif // _MSC_VER +}; + +// H is a bitmask of Hash enumerations. +// Hashable is hashable via all means specified in H. +template +struct Hashable { + static constexpr bool HashableBy(Hash h) { return h & H; } +}; + +namespace std { +template +struct hash> { + template , + class = typename std::enable_if::type> + size_t operator()(E) const { + return kStd; + } +}; +} // namespace std + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +template +size_t Hash(const T& v) { + return hash_default_hash()(v); +} + +TEST(Delegate, HashDispatch) { + EXPECT_EQ(Hash(kStd), Hash(Hashable())); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_generator_testing.cc b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_generator_testing.cc index 75c4db6c3..59cc5aac7 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_generator_testing.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_generator_testing.cc @@ -41,8 +41,10 @@ class RandomDeviceSeedSeq { } // namespace std::mt19937_64* GetSharedRng() { - RandomDeviceSeedSeq seed_seq; - static auto* rng = new std::mt19937_64(seed_seq); + static auto* rng = [] { + RandomDeviceSeedSeq seed_seq; + return new std::mt19937_64(seed_seq); + }(); return rng; } diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_generator_testing.h b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_generator_testing.h index 6869fe45e..f1f555a5c 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_generator_testing.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_generator_testing.h @@ -21,11 +21,13 @@ #include #include +#include #include #include #include #include #include +#include #include "absl/container/internal/hash_policy_testing.h" #include "absl/memory/memory.h" @@ -153,6 +155,25 @@ using GeneratedType = decltype( typename Container::value_type, typename Container::key_type>::type>&>()()); +// Naive wrapper that performs a linear search of previous values. +// Beware this is O(SQR), which is reasonable for smaller kMaxValues. +template +struct UniqueGenerator { + Generator gen; + std::vector values; + + T operator()() { + assert(values.size() < kMaxValues); + for (;;) { + T value = gen(); + if (std::find(values.begin(), values.end(), value) == values.end()) { + values.push_back(value); + return value; + } + } + } +}; + } // namespace hash_internal } // namespace container_internal ABSL_NAMESPACE_END diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_policy_traits.h b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_policy_traits.h index 3e1209c6e..46c97b18a 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_policy_traits.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hash_policy_traits.h @@ -17,6 +17,7 @@ #include #include +#include #include #include @@ -29,15 +30,34 @@ namespace container_internal { // Defines how slots are initialized/destroyed/moved. template struct hash_policy_traits { + // The type of the keys stored in the hashtable. + using key_type = typename Policy::key_type; + private: struct ReturnKey { - // We return `Key` here. + // When C++17 is available, we can use std::launder to provide mutable + // access to the key for use in node handle. +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 + template ::value, int> = 0> + static key_type& Impl(Key&& k, int) { + return *std::launder( + const_cast(std::addressof(std::forward(k)))); + } +#endif + + template + static Key Impl(Key&& k, char) { + return std::forward(k); + } + // When Key=T&, we forward the lvalue reference. // When Key=T, we return by value to avoid a dangling reference. // eg, for string_hash_map. template - Key operator()(Key&& k, const Args&...) const { - return std::forward(k); + auto operator()(Key&& k, const Args&...) const + -> decltype(Impl(std::forward(k), 0)) { + return Impl(std::forward(k), 0); } }; @@ -52,9 +72,6 @@ struct hash_policy_traits { // The actual object stored in the hash table. using slot_type = typename Policy::slot_type; - // The type of the keys stored in the hashtable. - using key_type = typename Policy::key_type; - // The argument type for insertions into the hashtable. This is different // from value_type for increased performance. See initializer_list constructor // and insert() member functions for more details. @@ -156,7 +173,7 @@ struct hash_policy_traits { // Returns the "key" portion of the slot. // Used for node handle manipulation. template - static auto key(slot_type* slot) + static auto mutable_key(slot_type* slot) -> decltype(P::apply(ReturnKey(), element(slot))) { return P::apply(ReturnKey(), element(slot)); } diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/hashtablez_sampler.cc b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hashtablez_sampler.cc index 886524f18..322e0547e 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/hashtablez_sampler.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hashtablez_sampler.cc @@ -21,11 +21,13 @@ #include #include "absl/base/attributes.h" -#include "absl/base/internal/exponential_biased.h" #include "absl/container/internal/have_sse.h" #include "absl/debugging/stacktrace.h" #include "absl/memory/memory.h" +#include "absl/profiling/internal/exponential_biased.h" +#include "absl/profiling/internal/sample_recorder.h" #include "absl/synchronization/mutex.h" +#include "absl/utility/utility.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -37,134 +39,53 @@ ABSL_CONST_INIT std::atomic g_hashtablez_enabled{ false }; ABSL_CONST_INIT std::atomic g_hashtablez_sample_parameter{1 << 10}; -ABSL_CONST_INIT std::atomic g_hashtablez_max_samples{1 << 20}; +std::atomic g_hashtablez_config_listener{nullptr}; #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased +ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased g_exponential_biased_generator; #endif +void TriggerHashtablezConfigListener() { + auto* listener = g_hashtablez_config_listener.load(std::memory_order_acquire); + if (listener != nullptr) listener(); +} + } // namespace #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0; +ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0}; #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -HashtablezSampler& HashtablezSampler::Global() { +HashtablezSampler& GlobalHashtablezSampler() { static auto* sampler = new HashtablezSampler(); return *sampler; } -HashtablezSampler::DisposeCallback HashtablezSampler::SetDisposeCallback( - DisposeCallback f) { - return dispose_.exchange(f, std::memory_order_relaxed); -} - -HashtablezInfo::HashtablezInfo() { PrepareForSampling(); } +HashtablezInfo::HashtablezInfo() = default; HashtablezInfo::~HashtablezInfo() = default; -void HashtablezInfo::PrepareForSampling() { +void HashtablezInfo::PrepareForSampling(int64_t stride, + size_t inline_element_size_value) { capacity.store(0, std::memory_order_relaxed); size.store(0, std::memory_order_relaxed); num_erases.store(0, std::memory_order_relaxed); + num_rehashes.store(0, std::memory_order_relaxed); max_probe_length.store(0, std::memory_order_relaxed); total_probe_length.store(0, std::memory_order_relaxed); hashes_bitwise_or.store(0, std::memory_order_relaxed); hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed); + hashes_bitwise_xor.store(0, std::memory_order_relaxed); + max_reserve.store(0, std::memory_order_relaxed); create_time = absl::Now(); + weight = stride; // The inliner makes hardcoded skip_count difficult (especially when combined // with LTO). We use the ability to exclude stacks by regex when encoding // instead. depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth, /* skip_count= */ 0); - dead = nullptr; -} - -HashtablezSampler::HashtablezSampler() - : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) { - absl::MutexLock l(&graveyard_.init_mu); - graveyard_.dead = &graveyard_; -} - -HashtablezSampler::~HashtablezSampler() { - HashtablezInfo* s = all_.load(std::memory_order_acquire); - while (s != nullptr) { - HashtablezInfo* next = s->next; - delete s; - s = next; - } -} - -void HashtablezSampler::PushNew(HashtablezInfo* sample) { - sample->next = all_.load(std::memory_order_relaxed); - while (!all_.compare_exchange_weak(sample->next, sample, - std::memory_order_release, - std::memory_order_relaxed)) { - } -} - -void HashtablezSampler::PushDead(HashtablezInfo* sample) { - if (auto* dispose = dispose_.load(std::memory_order_relaxed)) { - dispose(*sample); - } - - absl::MutexLock graveyard_lock(&graveyard_.init_mu); - absl::MutexLock sample_lock(&sample->init_mu); - sample->dead = graveyard_.dead; - graveyard_.dead = sample; -} - -HashtablezInfo* HashtablezSampler::PopDead() { - absl::MutexLock graveyard_lock(&graveyard_.init_mu); - - // The list is circular, so eventually it collapses down to - // graveyard_.dead == &graveyard_ - // when it is empty. - HashtablezInfo* sample = graveyard_.dead; - if (sample == &graveyard_) return nullptr; - - absl::MutexLock sample_lock(&sample->init_mu); - graveyard_.dead = sample->dead; - sample->PrepareForSampling(); - return sample; -} - -HashtablezInfo* HashtablezSampler::Register() { - int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed); - if (size > g_hashtablez_max_samples.load(std::memory_order_relaxed)) { - size_estimate_.fetch_sub(1, std::memory_order_relaxed); - dropped_samples_.fetch_add(1, std::memory_order_relaxed); - return nullptr; - } - - HashtablezInfo* sample = PopDead(); - if (sample == nullptr) { - // Resurrection failed. Hire a new warlock. - sample = new HashtablezInfo(); - PushNew(sample); - } - - return sample; -} - -void HashtablezSampler::Unregister(HashtablezInfo* sample) { - PushDead(sample); - size_estimate_.fetch_sub(1, std::memory_order_relaxed); -} - -int64_t HashtablezSampler::Iterate( - const std::function& f) { - HashtablezInfo* s = all_.load(std::memory_order_acquire); - while (s != nullptr) { - absl::MutexLock l(&s->init_mu); - if (s->dead == nullptr) { - f(*s); - } - s = s->next; - } - - return dropped_samples_.load(std::memory_order_relaxed); + inline_element_size = inline_element_size_value; } static bool ShouldForceSampling() { @@ -179,27 +100,40 @@ static bool ShouldForceSampling() { if (ABSL_PREDICT_TRUE(state == kDontForce)) return false; if (state == kUninitialized) { - state = AbslContainerInternalSampleEverything() ? kForce : kDontForce; + state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)() + ? kForce + : kDontForce; global_state.store(state, std::memory_order_relaxed); } return state == kForce; } -HashtablezInfo* SampleSlow(int64_t* next_sample) { +HashtablezInfo* SampleSlow(SamplingState& next_sample, + size_t inline_element_size) { if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { - *next_sample = 1; - return HashtablezSampler::Global().Register(); + next_sample.next_sample = 1; + const int64_t old_stride = exchange(next_sample.sample_stride, 1); + HashtablezInfo* result = + GlobalHashtablezSampler().Register(old_stride, inline_element_size); + return result; } #if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) - *next_sample = std::numeric_limits::max(); + next_sample = { + std::numeric_limits::max(), + std::numeric_limits::max(), + }; return nullptr; #else - bool first = *next_sample < 0; - *next_sample = g_exponential_biased_generator.GetStride( + bool first = next_sample.next_sample < 0; + + const int64_t next_stride = g_exponential_biased_generator.GetStride( g_hashtablez_sample_parameter.load(std::memory_order_relaxed)); + + next_sample.next_sample = next_stride; + const int64_t old_stride = exchange(next_sample.sample_stride, next_stride); // Small values of interval are equivalent to just sampling next time. - ABSL_ASSERT(*next_sample >= 1); + ABSL_ASSERT(next_stride >= 1); // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold // low enough that we will start sampling in a reasonable time, so we just use @@ -209,16 +143,16 @@ HashtablezInfo* SampleSlow(int64_t* next_sample) { // We will only be negative on our first count, so we should just retry in // that case. if (first) { - if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr; - return SampleSlow(next_sample); + if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr; + return SampleSlow(next_sample, inline_element_size); } - return HashtablezSampler::Global().Register(); + return GlobalHashtablezSampler().Register(old_stride, inline_element_size); #endif } void UnsampleSlow(HashtablezInfo* info) { - HashtablezSampler::Global().Unregister(info); + GlobalHashtablezSampler().Unregister(info); } void RecordInsertSlow(HashtablezInfo* info, size_t hash, @@ -234,6 +168,7 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash, info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed); info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed); + info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed); info->max_probe_length.store( std::max(info->max_probe_length.load(std::memory_order_relaxed), probe_length), @@ -242,11 +177,33 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash, info->size.fetch_add(1, std::memory_order_relaxed); } +void SetHashtablezConfigListener(HashtablezConfigListener l) { + g_hashtablez_config_listener.store(l, std::memory_order_release); +} + +bool IsHashtablezEnabled() { + return g_hashtablez_enabled.load(std::memory_order_acquire); +} + void SetHashtablezEnabled(bool enabled) { + SetHashtablezEnabledInternal(enabled); + TriggerHashtablezConfigListener(); +} + +void SetHashtablezEnabledInternal(bool enabled) { g_hashtablez_enabled.store(enabled, std::memory_order_release); } +int32_t GetHashtablezSampleParameter() { + return g_hashtablez_sample_parameter.load(std::memory_order_acquire); +} + void SetHashtablezSampleParameter(int32_t rate) { + SetHashtablezSampleParameterInternal(rate); + TriggerHashtablezConfigListener(); +} + +void SetHashtablezSampleParameterInternal(int32_t rate) { if (rate > 0) { g_hashtablez_sample_parameter.store(rate, std::memory_order_release); } else { @@ -255,9 +212,18 @@ void SetHashtablezSampleParameter(int32_t rate) { } } +int32_t GetHashtablezMaxSamples() { + return GlobalHashtablezSampler().GetMaxSamples(); +} + void SetHashtablezMaxSamples(int32_t max) { + SetHashtablezMaxSamplesInternal(max); + TriggerHashtablezConfigListener(); +} + +void SetHashtablezMaxSamplesInternal(int32_t max) { if (max > 0) { - g_hashtablez_max_samples.store(max, std::memory_order_release); + GlobalHashtablezSampler().SetMaxSamples(max); } else { ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld", static_cast(max)); // NOLINT(runtime/int) diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/hashtablez_sampler.h b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hashtablez_sampler.h index 8aaffc35a..e7c204eea 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/hashtablez_sampler.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hashtablez_sampler.h @@ -47,6 +47,7 @@ #include "absl/base/internal/per_thread_tls.h" #include "absl/base/optimization.h" #include "absl/container/internal/have_sse.h" +#include "absl/profiling/internal/sample_recorder.h" #include "absl/synchronization/mutex.h" #include "absl/utility/utility.h" @@ -57,7 +58,7 @@ namespace container_internal { // Stores information about a sampled hashtable. All mutations to this *must* // be made through `Record*` functions below. All reads from this *must* only // occur in the callback to `HashtablezSampler::Iterate`. -struct HashtablezInfo { +struct HashtablezInfo : public profiling_internal::Sample { // Constructs the object but does not fill in any fields. HashtablezInfo(); ~HashtablezInfo(); @@ -66,35 +67,32 @@ struct HashtablezInfo { // Puts the object into a clean state, fills in the logically `const` members, // blocking for any readers that are currently sampling the object. - void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); + void PrepareForSampling(int64_t stride, size_t inline_element_size_value) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); // These fields are mutated by the various Record* APIs and need to be // thread-safe. std::atomic capacity; std::atomic size; std::atomic num_erases; + std::atomic num_rehashes; std::atomic max_probe_length; std::atomic total_probe_length; std::atomic hashes_bitwise_or; std::atomic hashes_bitwise_and; - - // `HashtablezSampler` maintains intrusive linked lists for all samples. See - // comments on `HashtablezSampler::all_` for details on these. `init_mu` - // guards the ability to restore the sample to a pristine state. This - // prevents races with sampling and resurrecting an object. - absl::Mutex init_mu; - HashtablezInfo* next; - HashtablezInfo* dead ABSL_GUARDED_BY(init_mu); + std::atomic hashes_bitwise_xor; + std::atomic max_reserve; // All of the fields below are set by `PrepareForSampling`, they must not be // mutated in `Record*` functions. They are logically `const` in that sense. - // These are guarded by init_mu, but that is not externalized to clients, who - // can only read them during `HashtablezSampler::Iterate` which will hold the - // lock. + // These are guarded by init_mu, but that is not externalized to clients, + // which can read them only during `SampleRecorder::Iterate` which will hold + // the lock. static constexpr int kMaxStackDepth = 64; absl::Time create_time; int32_t depth; void* stack[kMaxStackDepth]; + size_t inline_element_size; // How big is the slot? }; inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { @@ -105,6 +103,23 @@ inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { #endif info->total_probe_length.store(total_probe_length, std::memory_order_relaxed); info->num_erases.store(0, std::memory_order_relaxed); + // There is only one concurrent writer, so `load` then `store` is sufficient + // instead of using `fetch_add`. + info->num_rehashes.store( + 1 + info->num_rehashes.load(std::memory_order_relaxed), + std::memory_order_relaxed); +} + +inline void RecordReservationSlow(HashtablezInfo* info, + size_t target_capacity) { + info->max_reserve.store( + (std::max)(info->max_reserve.load(std::memory_order_relaxed), + target_capacity), + std::memory_order_relaxed); +} + +inline void RecordClearedReservationSlow(HashtablezInfo* info) { + info->max_reserve.store(0, std::memory_order_relaxed); } inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, @@ -113,7 +128,8 @@ inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, info->capacity.store(capacity, std::memory_order_relaxed); if (size == 0) { // This is a clear, reset the total/num_erases too. - RecordRehashSlow(info, 0); + info->total_probe_length.store(0, std::memory_order_relaxed); + info->num_erases.store(0, std::memory_order_relaxed); } } @@ -122,12 +138,29 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash, inline void RecordEraseSlow(HashtablezInfo* info) { info->size.fetch_sub(1, std::memory_order_relaxed); - info->num_erases.fetch_add(1, std::memory_order_relaxed); + // There is only one concurrent writer, so `load` then `store` is sufficient + // instead of using `fetch_add`. + info->num_erases.store( + 1 + info->num_erases.load(std::memory_order_relaxed), + std::memory_order_relaxed); } -HashtablezInfo* SampleSlow(int64_t* next_sample); +struct SamplingState { + int64_t next_sample; + // When we make a sampling decision, we record that distance so we can weight + // each sample. + int64_t sample_stride; +}; + +HashtablezInfo* SampleSlow(SamplingState& next_sample, + size_t inline_element_size); void UnsampleSlow(HashtablezInfo* info); +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set +#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) class HashtablezInfoHandle { public: explicit HashtablezInfoHandle() : info_(nullptr) {} @@ -160,6 +193,16 @@ class HashtablezInfoHandle { RecordRehashSlow(info_, total_probe_length); } + inline void RecordReservation(size_t target_capacity) { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordReservationSlow(info_, target_capacity); + } + + inline void RecordClearedReservation() { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordClearedReservationSlow(info_); + } + inline void RecordInsert(size_t hash, size_t distance_from_desired) { if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; RecordInsertSlow(info_, hash, distance_from_desired); @@ -179,116 +222,75 @@ class HashtablezInfoHandle { friend class HashtablezInfoHandlePeer; HashtablezInfo* info_; }; +#else +// Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can +// be removed by the linker, in order to reduce the binary size. +class HashtablezInfoHandle { + public: + explicit HashtablezInfoHandle() = default; + explicit HashtablezInfoHandle(std::nullptr_t) {} -#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set + inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {} + inline void RecordRehash(size_t /*total_probe_length*/) {} + inline void RecordReservation(size_t /*target_capacity*/) {} + inline void RecordClearedReservation() {} + inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {} + inline void RecordErase() {} + + friend inline void swap(HashtablezInfoHandle& /*lhs*/, + HashtablezInfoHandle& /*rhs*/) {} +}; #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -#if (ABSL_PER_THREAD_TLS == 1) && !defined(ABSL_BUILD_DLL) && \ - !defined(ABSL_CONSUME_DLL) -#define ABSL_INTERNAL_HASHTABLEZ_SAMPLE -#endif - #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample; -#endif // ABSL_PER_THREAD_TLS +extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample; +#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) // Returns an RAII sampling handle that manages registration and unregistation // with the global sampler. -inline HashtablezInfoHandle Sample() { +inline HashtablezInfoHandle Sample( + size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) { #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) - if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) { + if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) { return HashtablezInfoHandle(nullptr); } - return HashtablezInfoHandle(SampleSlow(&global_next_sample)); + return HashtablezInfoHandle( + SampleSlow(global_next_sample, inline_element_size)); #else return HashtablezInfoHandle(nullptr); #endif // !ABSL_PER_THREAD_TLS } -// Holds samples and their associated stack traces with a soft limit of -// `SetHashtablezMaxSamples()`. -// -// Thread safe. -class HashtablezSampler { - public: - // Returns a global Sampler. - static HashtablezSampler& Global(); +using HashtablezSampler = + ::absl::profiling_internal::SampleRecorder; - HashtablezSampler(); - ~HashtablezSampler(); +// Returns a global Sampler. +HashtablezSampler& GlobalHashtablezSampler(); - // Registers for sampling. Returns an opaque registration info. - HashtablezInfo* Register(); - - // Unregisters the sample. - void Unregister(HashtablezInfo* sample); - - // The dispose callback will be called on all samples the moment they are - // being unregistered. Only affects samples that are unregistered after the - // callback has been set. - // Returns the previous callback. - using DisposeCallback = void (*)(const HashtablezInfo&); - DisposeCallback SetDisposeCallback(DisposeCallback f); - - // Iterates over all the registered `StackInfo`s. Returning the number of - // samples that have been dropped. - int64_t Iterate(const std::function& f); - - private: - void PushNew(HashtablezInfo* sample); - void PushDead(HashtablezInfo* sample); - HashtablezInfo* PopDead(); - - std::atomic dropped_samples_; - std::atomic size_estimate_; - - // Intrusive lock free linked lists for tracking samples. - // - // `all_` records all samples (they are never removed from this list) and is - // terminated with a `nullptr`. - // - // `graveyard_.dead` is a circular linked list. When it is empty, - // `graveyard_.dead == &graveyard`. The list is circular so that - // every item on it (even the last) has a non-null dead pointer. This allows - // `Iterate` to determine if a given sample is live or dead using only - // information on the sample itself. - // - // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead - // looks like this (G is the Graveyard): - // - // +---+ +---+ +---+ +---+ +---+ - // all -->| A |--->| B |--->| C |--->| D |--->| E | - // | | | | | | | | | | - // +---+ | | +->| |-+ | | +->| |-+ | | - // | G | +---+ | +---+ | +---+ | +---+ | +---+ - // | | | | | | - // | | --------+ +--------+ | - // +---+ | - // ^ | - // +--------------------------------------+ - // - std::atomic all_; - HashtablezInfo graveyard_; - - std::atomic dispose_; -}; +using HashtablezConfigListener = void (*)(); +void SetHashtablezConfigListener(HashtablezConfigListener l); // Enables or disables sampling for Swiss tables. +bool IsHashtablezEnabled(); void SetHashtablezEnabled(bool enabled); +void SetHashtablezEnabledInternal(bool enabled); // Sets the rate at which Swiss tables will be sampled. +int32_t GetHashtablezSampleParameter(); void SetHashtablezSampleParameter(int32_t rate); +void SetHashtablezSampleParameterInternal(int32_t rate); // Sets a soft max for the number of samples that will be kept. +int32_t GetHashtablezMaxSamples(); void SetHashtablezMaxSamples(int32_t max); +void SetHashtablezMaxSamplesInternal(int32_t max); // Configuration override. // This allows process-wide sampling without depending on order of // initialization of static storage duration objects. // The definition of this constant is weak, which allows us to inject a // different value for it at link time. -extern "C" bool AbslContainerInternalSampleEverything(); +extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)(); } // namespace container_internal ABSL_NAMESPACE_END diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hashtablez_sampler_force_weak_definition.cc index 78b9d362a..ed35a7eec 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/hashtablez_sampler_force_weak_definition.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hashtablez_sampler_force_weak_definition.cc @@ -21,7 +21,8 @@ ABSL_NAMESPACE_BEGIN namespace container_internal { // See hashtablez_sampler.h for details. -extern "C" ABSL_ATTRIBUTE_WEAK bool AbslContainerInternalSampleEverything() { +extern "C" ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL( + AbslContainerInternalSampleEverything)() { return false; } diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/hashtablez_sampler_test.cc b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hashtablez_sampler_test.cc new file mode 100644 index 000000000..77cdf2fd9 --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/hashtablez_sampler_test.cc @@ -0,0 +1,428 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hashtablez_sampler.h" + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/container/internal/have_sse.h" +#include "absl/profiling/internal/sample_recorder.h" +#include "absl/synchronization/blocking_counter.h" +#include "absl/synchronization/internal/thread_pool.h" +#include "absl/synchronization/mutex.h" +#include "absl/synchronization/notification.h" +#include "absl/time/clock.h" +#include "absl/time/time.h" + +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 +constexpr int kProbeLength = 16; +#else +constexpr int kProbeLength = 8; +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +class HashtablezInfoHandlePeer { + public: + static bool IsSampled(const HashtablezInfoHandle& h) { + return h.info_ != nullptr; + } + + static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; } +}; +#else +class HashtablezInfoHandlePeer { + public: + static bool IsSampled(const HashtablezInfoHandle&) { return false; } + static HashtablezInfo* GetInfo(HashtablezInfoHandle*) { return nullptr; } +}; +#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + +namespace { +using ::absl::synchronization_internal::ThreadPool; +using ::testing::IsEmpty; +using ::testing::UnorderedElementsAre; + +std::vector GetSizes(HashtablezSampler* s) { + std::vector res; + s->Iterate([&](const HashtablezInfo& info) { + res.push_back(info.size.load(std::memory_order_acquire)); + }); + return res; +} + +HashtablezInfo* Register(HashtablezSampler* s, size_t size) { + const int64_t test_stride = 123; + const size_t test_element_size = 17; + auto* info = s->Register(test_stride, test_element_size); + assert(info != nullptr); + info->size.store(size); + return info; +} + +TEST(HashtablezInfoTest, PrepareForSampling) { + absl::Time test_start = absl::Now(); + const int64_t test_stride = 123; + const size_t test_element_size = 17; + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + info.PrepareForSampling(test_stride, test_element_size); + + EXPECT_EQ(info.capacity.load(), 0); + EXPECT_EQ(info.size.load(), 0); + EXPECT_EQ(info.num_erases.load(), 0); + EXPECT_EQ(info.num_rehashes.load(), 0); + EXPECT_EQ(info.max_probe_length.load(), 0); + EXPECT_EQ(info.total_probe_length.load(), 0); + EXPECT_EQ(info.hashes_bitwise_or.load(), 0); + EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); + EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); + EXPECT_EQ(info.max_reserve.load(), 0); + EXPECT_GE(info.create_time, test_start); + EXPECT_EQ(info.weight, test_stride); + EXPECT_EQ(info.inline_element_size, test_element_size); + + info.capacity.store(1, std::memory_order_relaxed); + info.size.store(1, std::memory_order_relaxed); + info.num_erases.store(1, std::memory_order_relaxed); + info.max_probe_length.store(1, std::memory_order_relaxed); + info.total_probe_length.store(1, std::memory_order_relaxed); + info.hashes_bitwise_or.store(1, std::memory_order_relaxed); + info.hashes_bitwise_and.store(1, std::memory_order_relaxed); + info.hashes_bitwise_xor.store(1, std::memory_order_relaxed); + info.max_reserve.store(1, std::memory_order_relaxed); + info.create_time = test_start - absl::Hours(20); + + info.PrepareForSampling(test_stride * 2, test_element_size); + EXPECT_EQ(info.capacity.load(), 0); + EXPECT_EQ(info.size.load(), 0); + EXPECT_EQ(info.num_erases.load(), 0); + EXPECT_EQ(info.num_rehashes.load(), 0); + EXPECT_EQ(info.max_probe_length.load(), 0); + EXPECT_EQ(info.total_probe_length.load(), 0); + EXPECT_EQ(info.hashes_bitwise_or.load(), 0); + EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); + EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); + EXPECT_EQ(info.max_reserve.load(), 0); + EXPECT_EQ(info.weight, 2 * test_stride); + EXPECT_EQ(info.inline_element_size, test_element_size); + EXPECT_GE(info.create_time, test_start); +} + +TEST(HashtablezInfoTest, RecordStorageChanged) { + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + const int64_t test_stride = 21; + const size_t test_element_size = 19; + info.PrepareForSampling(test_stride, test_element_size); + RecordStorageChangedSlow(&info, 17, 47); + EXPECT_EQ(info.size.load(), 17); + EXPECT_EQ(info.capacity.load(), 47); + RecordStorageChangedSlow(&info, 20, 20); + EXPECT_EQ(info.size.load(), 20); + EXPECT_EQ(info.capacity.load(), 20); +} + +TEST(HashtablezInfoTest, RecordInsert) { + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + const int64_t test_stride = 25; + const size_t test_element_size = 23; + info.PrepareForSampling(test_stride, test_element_size); + EXPECT_EQ(info.max_probe_length.load(), 0); + RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); + EXPECT_EQ(info.max_probe_length.load(), 6); + EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000FF00); + EXPECT_EQ(info.hashes_bitwise_or.load(), 0x0000FF00); + EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x0000FF00); + RecordInsertSlow(&info, 0x000FF000, 4 * kProbeLength); + EXPECT_EQ(info.max_probe_length.load(), 6); + EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000F000); + EXPECT_EQ(info.hashes_bitwise_or.load(), 0x000FFF00); + EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x000F0F00); + RecordInsertSlow(&info, 0x00FF0000, 12 * kProbeLength); + EXPECT_EQ(info.max_probe_length.load(), 12); + EXPECT_EQ(info.hashes_bitwise_and.load(), 0x00000000); + EXPECT_EQ(info.hashes_bitwise_or.load(), 0x00FFFF00); + EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x00F00F00); +} + +TEST(HashtablezInfoTest, RecordErase) { + const int64_t test_stride = 31; + const size_t test_element_size = 29; + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + info.PrepareForSampling(test_stride, test_element_size); + EXPECT_EQ(info.num_erases.load(), 0); + EXPECT_EQ(info.size.load(), 0); + RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); + EXPECT_EQ(info.size.load(), 1); + RecordEraseSlow(&info); + EXPECT_EQ(info.size.load(), 0); + EXPECT_EQ(info.num_erases.load(), 1); + EXPECT_EQ(info.inline_element_size, test_element_size); +} + +TEST(HashtablezInfoTest, RecordRehash) { + const int64_t test_stride = 33; + const size_t test_element_size = 31; + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + info.PrepareForSampling(test_stride, test_element_size); + RecordInsertSlow(&info, 0x1, 0); + RecordInsertSlow(&info, 0x2, kProbeLength); + RecordInsertSlow(&info, 0x4, kProbeLength); + RecordInsertSlow(&info, 0x8, 2 * kProbeLength); + EXPECT_EQ(info.size.load(), 4); + EXPECT_EQ(info.total_probe_length.load(), 4); + + RecordEraseSlow(&info); + RecordEraseSlow(&info); + EXPECT_EQ(info.size.load(), 2); + EXPECT_EQ(info.total_probe_length.load(), 4); + EXPECT_EQ(info.num_erases.load(), 2); + + RecordRehashSlow(&info, 3 * kProbeLength); + EXPECT_EQ(info.size.load(), 2); + EXPECT_EQ(info.total_probe_length.load(), 3); + EXPECT_EQ(info.num_erases.load(), 0); + EXPECT_EQ(info.num_rehashes.load(), 1); + EXPECT_EQ(info.inline_element_size, test_element_size); +} + +TEST(HashtablezInfoTest, RecordReservation) { + HashtablezInfo info; + absl::MutexLock l(&info.init_mu); + const int64_t test_stride = 35; + const size_t test_element_size = 33; + info.PrepareForSampling(test_stride, test_element_size); + RecordReservationSlow(&info, 3); + EXPECT_EQ(info.max_reserve.load(), 3); + + RecordReservationSlow(&info, 2); + // High watermark does not change + EXPECT_EQ(info.max_reserve.load(), 3); + + RecordReservationSlow(&info, 10); + // High watermark does change + EXPECT_EQ(info.max_reserve.load(), 10); +} + +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +TEST(HashtablezSamplerTest, SmallSampleParameter) { + const size_t test_element_size = 31; + SetHashtablezEnabled(true); + SetHashtablezSampleParameter(100); + + for (int i = 0; i < 1000; ++i) { + SamplingState next_sample = {0, 0}; + HashtablezInfo* sample = SampleSlow(next_sample, test_element_size); + EXPECT_GT(next_sample.next_sample, 0); + EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride); + EXPECT_NE(sample, nullptr); + UnsampleSlow(sample); + } +} + +TEST(HashtablezSamplerTest, LargeSampleParameter) { + const size_t test_element_size = 31; + SetHashtablezEnabled(true); + SetHashtablezSampleParameter(std::numeric_limits::max()); + + for (int i = 0; i < 1000; ++i) { + SamplingState next_sample = {0, 0}; + HashtablezInfo* sample = SampleSlow(next_sample, test_element_size); + EXPECT_GT(next_sample.next_sample, 0); + EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride); + EXPECT_NE(sample, nullptr); + UnsampleSlow(sample); + } +} + +TEST(HashtablezSamplerTest, Sample) { + const size_t test_element_size = 31; + SetHashtablezEnabled(true); + SetHashtablezSampleParameter(100); + int64_t num_sampled = 0; + int64_t total = 0; + double sample_rate = 0.0; + for (int i = 0; i < 1000000; ++i) { + HashtablezInfoHandle h = Sample(test_element_size); + ++total; + if (HashtablezInfoHandlePeer::IsSampled(h)) { + ++num_sampled; + } + sample_rate = static_cast(num_sampled) / total; + if (0.005 < sample_rate && sample_rate < 0.015) break; + } + EXPECT_NEAR(sample_rate, 0.01, 0.005); +} + +TEST(HashtablezSamplerTest, Handle) { + auto& sampler = GlobalHashtablezSampler(); + const int64_t test_stride = 41; + const size_t test_element_size = 39; + HashtablezInfoHandle h(sampler.Register(test_stride, test_element_size)); + auto* info = HashtablezInfoHandlePeer::GetInfo(&h); + info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed); + + bool found = false; + sampler.Iterate([&](const HashtablezInfo& h) { + if (&h == info) { + EXPECT_EQ(h.weight, test_stride); + EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678); + found = true; + } + }); + EXPECT_TRUE(found); + + h = HashtablezInfoHandle(); + found = false; + sampler.Iterate([&](const HashtablezInfo& h) { + if (&h == info) { + // this will only happen if some other thread has resurrected the info + // the old handle was using. + if (h.hashes_bitwise_and.load() == 0x12345678) { + found = true; + } + } + }); + EXPECT_FALSE(found); +} +#endif + + +TEST(HashtablezSamplerTest, Registration) { + HashtablezSampler sampler; + auto* info1 = Register(&sampler, 1); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1)); + + auto* info2 = Register(&sampler, 2); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2)); + info1->size.store(3); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2)); + + sampler.Unregister(info1); + sampler.Unregister(info2); +} + +TEST(HashtablezSamplerTest, Unregistration) { + HashtablezSampler sampler; + std::vector infos; + for (size_t i = 0; i < 3; ++i) { + infos.push_back(Register(&sampler, i)); + } + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2)); + + sampler.Unregister(infos[1]); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2)); + + infos.push_back(Register(&sampler, 3)); + infos.push_back(Register(&sampler, 4)); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4)); + sampler.Unregister(infos[3]); + EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4)); + + sampler.Unregister(infos[0]); + sampler.Unregister(infos[2]); + sampler.Unregister(infos[4]); + EXPECT_THAT(GetSizes(&sampler), IsEmpty()); +} + +TEST(HashtablezSamplerTest, MultiThreaded) { + HashtablezSampler sampler; + Notification stop; + ThreadPool pool(10); + + for (int i = 0; i < 10; ++i) { + const int64_t sampling_stride = 11 + i % 3; + const size_t elt_size = 10 + i % 2; + pool.Schedule([&sampler, &stop, sampling_stride, elt_size]() { + std::random_device rd; + std::mt19937 gen(rd()); + + std::vector infoz; + while (!stop.HasBeenNotified()) { + if (infoz.empty()) { + infoz.push_back(sampler.Register(sampling_stride, elt_size)); + } + switch (std::uniform_int_distribution<>(0, 2)(gen)) { + case 0: { + infoz.push_back(sampler.Register(sampling_stride, elt_size)); + break; + } + case 1: { + size_t p = + std::uniform_int_distribution<>(0, infoz.size() - 1)(gen); + HashtablezInfo* info = infoz[p]; + infoz[p] = infoz.back(); + infoz.pop_back(); + EXPECT_EQ(info->weight, sampling_stride); + sampler.Unregister(info); + break; + } + case 2: { + absl::Duration oldest = absl::ZeroDuration(); + sampler.Iterate([&](const HashtablezInfo& info) { + oldest = std::max(oldest, absl::Now() - info.create_time); + }); + ASSERT_GE(oldest, absl::ZeroDuration()); + break; + } + } + } + }); + } + // The threads will hammer away. Give it a little bit of time for tsan to + // spot errors. + absl::SleepFor(absl::Seconds(3)); + stop.Notify(); +} + +TEST(HashtablezSamplerTest, Callback) { + HashtablezSampler sampler; + + auto* info1 = Register(&sampler, 1); + auto* info2 = Register(&sampler, 2); + + static const HashtablezInfo* expected; + + auto callback = [](const HashtablezInfo& info) { + // We can't use `info` outside of this callback because the object will be + // disposed as soon as we return from here. + EXPECT_EQ(&info, expected); + }; + + // Set the callback. + EXPECT_EQ(sampler.SetDisposeCallback(callback), nullptr); + expected = info1; + sampler.Unregister(info1); + + // Unset the callback. + EXPECT_EQ(callback, sampler.SetDisposeCallback(nullptr)); + expected = nullptr; // no more calls. + sampler.Unregister(info2); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/inlined_vector.h b/TMessagesProj/jni/voip/webrtc/absl/container/internal/inlined_vector.h index 4d80b727b..2baf26f3e 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/inlined_vector.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/inlined_vector.h @@ -21,8 +21,11 @@ #include #include #include +#include +#include #include +#include "absl/base/attributes.h" #include "absl/base/macros.h" #include "absl/container/internal/compressed_tuple.h" #include "absl/memory/memory.h" @@ -33,96 +36,149 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace inlined_vector_internal { +// GCC does not deal very well with the below code +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Warray-bounds" +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + +template +using AllocatorTraits = std::allocator_traits; +template +using ValueType = typename AllocatorTraits::value_type; +template +using SizeType = typename AllocatorTraits::size_type; +template +using Pointer = typename AllocatorTraits::pointer; +template +using ConstPointer = typename AllocatorTraits::const_pointer; +template +using SizeType = typename AllocatorTraits::size_type; +template +using DifferenceType = typename AllocatorTraits::difference_type; +template +using Reference = ValueType&; +template +using ConstReference = const ValueType&; +template +using Iterator = Pointer; +template +using ConstIterator = ConstPointer; +template +using ReverseIterator = typename std::reverse_iterator>; +template +using ConstReverseIterator = typename std::reverse_iterator>; +template +using MoveIterator = typename std::move_iterator>; + template using IsAtLeastForwardIterator = std::is_convertible< typename std::iterator_traits::iterator_category, std::forward_iterator_tag>; -template ::value_type> +template using IsMemcpyOk = - absl::conjunction>, - absl::is_trivially_copy_constructible, - absl::is_trivially_copy_assignable, - absl::is_trivially_destructible>; + absl::conjunction>>, + absl::is_trivially_copy_constructible>, + absl::is_trivially_copy_assignable>, + absl::is_trivially_destructible>>; -template -void DestroyElements(AllocatorType* alloc_ptr, Pointer destroy_first, - SizeType destroy_size) { - using AllocatorTraits = absl::allocator_traits; +template +struct TypeIdentity { + using type = T; +}; - if (destroy_first != nullptr) { - for (auto i = destroy_size; i != 0;) { +// Used for function arguments in template functions to prevent ADL by forcing +// callers to explicitly specify the template parameter. +template +using NoTypeDeduction = typename TypeIdentity::type; + +template >::value> +struct DestroyAdapter; + +template +struct DestroyAdapter { + static void DestroyElements(A& allocator, Pointer destroy_first, + SizeType destroy_size) { + for (SizeType i = destroy_size; i != 0;) { --i; - AllocatorTraits::destroy(*alloc_ptr, destroy_first + i); + AllocatorTraits::destroy(allocator, destroy_first + i); } - -#if !defined(NDEBUG) - { - using ValueType = typename AllocatorTraits::value_type; - - // Overwrite unused memory with `0xab` so we can catch uninitialized - // usage. - // - // Cast to `void*` to tell the compiler that we don't care that we might - // be scribbling on a vtable pointer. - void* memory_ptr = destroy_first; - auto memory_size = destroy_size * sizeof(ValueType); - std::memset(memory_ptr, 0xab, memory_size); - } -#endif // !defined(NDEBUG) } -} +}; -template -void ConstructElements(AllocatorType* alloc_ptr, Pointer construct_first, - ValueAdapter* values_ptr, SizeType construct_size) { - for (SizeType i = 0; i < construct_size; ++i) { - ABSL_INTERNAL_TRY { - values_ptr->ConstructNext(alloc_ptr, construct_first + i); - } +template +struct DestroyAdapter { + static void DestroyElements(A& allocator, Pointer destroy_first, + SizeType destroy_size) { + static_cast(allocator); + static_cast(destroy_first); + static_cast(destroy_size); + } +}; + +template +struct Allocation { + Pointer data; + SizeType capacity; +}; + +template ) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)> +struct MallocAdapter { + static Allocation Allocate(A& allocator, SizeType requested_capacity) { + return {AllocatorTraits::allocate(allocator, requested_capacity), + requested_capacity}; + } + + static void Deallocate(A& allocator, Pointer pointer, + SizeType capacity) { + AllocatorTraits::deallocate(allocator, pointer, capacity); + } +}; + +template +void ConstructElements(NoTypeDeduction& allocator, + Pointer construct_first, ValueAdapter& values, + SizeType construct_size) { + for (SizeType i = 0; i < construct_size; ++i) { + ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); } ABSL_INTERNAL_CATCH_ANY { - inlined_vector_internal::DestroyElements(alloc_ptr, construct_first, i); + DestroyAdapter::DestroyElements(allocator, construct_first, i); ABSL_INTERNAL_RETHROW; } } } -template -void AssignElements(Pointer assign_first, ValueAdapter* values_ptr, - SizeType assign_size) { - for (SizeType i = 0; i < assign_size; ++i) { - values_ptr->AssignNext(assign_first + i); +template +void AssignElements(Pointer assign_first, ValueAdapter& values, + SizeType assign_size) { + for (SizeType i = 0; i < assign_size; ++i) { + values.AssignNext(assign_first + i); } } -template +template struct StorageView { - using AllocatorTraits = absl::allocator_traits; - using Pointer = typename AllocatorTraits::pointer; - using SizeType = typename AllocatorTraits::size_type; - - Pointer data; - SizeType size; - SizeType capacity; + Pointer data; + SizeType size; + SizeType capacity; }; -template +template class IteratorValueAdapter { - using AllocatorTraits = absl::allocator_traits; - using Pointer = typename AllocatorTraits::pointer; - public: explicit IteratorValueAdapter(const Iterator& it) : it_(it) {} - void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) { - AllocatorTraits::construct(*alloc_ptr, construct_at, *it_); + void ConstructNext(A& allocator, Pointer construct_at) { + AllocatorTraits::construct(allocator, construct_at, *it_); ++it_; } - void AssignNext(Pointer assign_at) { + void AssignNext(Pointer assign_at) { *assign_at = *it_; ++it_; } @@ -131,166 +187,123 @@ class IteratorValueAdapter { Iterator it_; }; -template +template class CopyValueAdapter { - using AllocatorTraits = absl::allocator_traits; - using ValueType = typename AllocatorTraits::value_type; - using Pointer = typename AllocatorTraits::pointer; - using ConstPointer = typename AllocatorTraits::const_pointer; - public: - explicit CopyValueAdapter(const ValueType& v) : ptr_(std::addressof(v)) {} + explicit CopyValueAdapter(ConstPointer p) : ptr_(p) {} - void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) { - AllocatorTraits::construct(*alloc_ptr, construct_at, *ptr_); + void ConstructNext(A& allocator, Pointer construct_at) { + AllocatorTraits::construct(allocator, construct_at, *ptr_); } - void AssignNext(Pointer assign_at) { *assign_at = *ptr_; } + void AssignNext(Pointer assign_at) { *assign_at = *ptr_; } private: - ConstPointer ptr_; + ConstPointer ptr_; }; -template +template class DefaultValueAdapter { - using AllocatorTraits = absl::allocator_traits; - using ValueType = typename AllocatorTraits::value_type; - using Pointer = typename AllocatorTraits::pointer; - public: explicit DefaultValueAdapter() {} - void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) { - AllocatorTraits::construct(*alloc_ptr, construct_at); + void ConstructNext(A& allocator, Pointer construct_at) { + AllocatorTraits::construct(allocator, construct_at); } - void AssignNext(Pointer assign_at) { *assign_at = ValueType(); } + void AssignNext(Pointer assign_at) { *assign_at = ValueType(); } }; -template +template class AllocationTransaction { - using AllocatorTraits = absl::allocator_traits; - using Pointer = typename AllocatorTraits::pointer; - using SizeType = typename AllocatorTraits::size_type; - public: - explicit AllocationTransaction(AllocatorType* alloc_ptr) - : alloc_data_(*alloc_ptr, nullptr) {} + explicit AllocationTransaction(A& allocator) + : allocator_data_(allocator, nullptr), capacity_(0) {} ~AllocationTransaction() { if (DidAllocate()) { - AllocatorTraits::deallocate(GetAllocator(), GetData(), GetCapacity()); + MallocAdapter::Deallocate(GetAllocator(), GetData(), GetCapacity()); } } AllocationTransaction(const AllocationTransaction&) = delete; void operator=(const AllocationTransaction&) = delete; - AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); } - Pointer& GetData() { return alloc_data_.template get<1>(); } - SizeType& GetCapacity() { return capacity_; } + A& GetAllocator() { return allocator_data_.template get<0>(); } + Pointer& GetData() { return allocator_data_.template get<1>(); } + SizeType& GetCapacity() { return capacity_; } bool DidAllocate() { return GetData() != nullptr; } - Pointer Allocate(SizeType capacity) { - GetData() = AllocatorTraits::allocate(GetAllocator(), capacity); - GetCapacity() = capacity; - return GetData(); + + Pointer Allocate(SizeType requested_capacity) { + Allocation result = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + GetData() = result.data; + GetCapacity() = result.capacity; + return result.data; } + ABSL_MUST_USE_RESULT Allocation Release() && { + Allocation result = {GetData(), GetCapacity()}; + Reset(); + return result; + } + + private: void Reset() { GetData() = nullptr; GetCapacity() = 0; } - private: - container_internal::CompressedTuple alloc_data_; - SizeType capacity_ = 0; + container_internal::CompressedTuple> allocator_data_; + SizeType capacity_; }; -template +template class ConstructionTransaction { - using AllocatorTraits = absl::allocator_traits; - using Pointer = typename AllocatorTraits::pointer; - using SizeType = typename AllocatorTraits::size_type; - public: - explicit ConstructionTransaction(AllocatorType* alloc_ptr) - : alloc_data_(*alloc_ptr, nullptr) {} + explicit ConstructionTransaction(A& allocator) + : allocator_data_(allocator, nullptr), size_(0) {} ~ConstructionTransaction() { if (DidConstruct()) { - inlined_vector_internal::DestroyElements(std::addressof(GetAllocator()), - GetData(), GetSize()); + DestroyAdapter::DestroyElements(GetAllocator(), GetData(), GetSize()); } } ConstructionTransaction(const ConstructionTransaction&) = delete; void operator=(const ConstructionTransaction&) = delete; - AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); } - Pointer& GetData() { return alloc_data_.template get<1>(); } - SizeType& GetSize() { return size_; } + A& GetAllocator() { return allocator_data_.template get<0>(); } + Pointer& GetData() { return allocator_data_.template get<1>(); } + SizeType& GetSize() { return size_; } bool DidConstruct() { return GetData() != nullptr; } template - void Construct(Pointer data, ValueAdapter* values_ptr, SizeType size) { - inlined_vector_internal::ConstructElements(std::addressof(GetAllocator()), - data, values_ptr, size); + void Construct(Pointer data, ValueAdapter& values, SizeType size) { + ConstructElements(GetAllocator(), data, values, size); GetData() = data; GetSize() = size; } - void Commit() { + void Commit() && { GetData() = nullptr; GetSize() = 0; } private: - container_internal::CompressedTuple alloc_data_; - SizeType size_ = 0; + container_internal::CompressedTuple> allocator_data_; + SizeType size_; }; template class Storage { public: - using AllocatorTraits = absl::allocator_traits; - using allocator_type = typename AllocatorTraits::allocator_type; - using value_type = typename AllocatorTraits::value_type; - using pointer = typename AllocatorTraits::pointer; - using const_pointer = typename AllocatorTraits::const_pointer; - using size_type = typename AllocatorTraits::size_type; - using difference_type = typename AllocatorTraits::difference_type; - - using reference = value_type&; - using const_reference = const value_type&; - using RValueReference = value_type&&; - using iterator = pointer; - using const_iterator = const_pointer; - using reverse_iterator = std::reverse_iterator; - using const_reverse_iterator = std::reverse_iterator; - using MoveIterator = std::move_iterator; - using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk; - - using StorageView = inlined_vector_internal::StorageView; - - template - using IteratorValueAdapter = - inlined_vector_internal::IteratorValueAdapter; - using CopyValueAdapter = - inlined_vector_internal::CopyValueAdapter; - using DefaultValueAdapter = - inlined_vector_internal::DefaultValueAdapter; - - using AllocationTransaction = - inlined_vector_internal::AllocationTransaction; - using ConstructionTransaction = - inlined_vector_internal::ConstructionTransaction; - - static size_type NextCapacity(size_type current_capacity) { + static SizeType NextCapacity(SizeType current_capacity) { return current_capacity * 2; } - static size_type ComputeCapacity(size_type current_capacity, - size_type requested_capacity) { + static SizeType ComputeCapacity(SizeType current_capacity, + SizeType requested_capacity) { return (std::max)(NextCapacity(current_capacity), requested_capacity); } @@ -298,140 +311,138 @@ class Storage { // Storage Constructors and Destructor // --------------------------------------------------------------------------- - Storage() : metadata_() {} + Storage() : metadata_(A(), /* size and is_allocated */ 0u) {} - explicit Storage(const allocator_type& alloc) : metadata_(alloc, {}) {} + explicit Storage(const A& allocator) + : metadata_(allocator, /* size and is_allocated */ 0u) {} ~Storage() { - pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData(); - inlined_vector_internal::DestroyElements(GetAllocPtr(), data, GetSize()); - DeallocateIfAllocated(); + if (GetSizeAndIsAllocated() == 0) { + // Empty and not allocated; nothing to do. + } else if (IsMemcpyOk::value) { + // No destructors need to be run; just deallocate if necessary. + DeallocateIfAllocated(); + } else { + DestroyContents(); + } } // --------------------------------------------------------------------------- // Storage Member Accessors // --------------------------------------------------------------------------- - size_type& GetSizeAndIsAllocated() { return metadata_.template get<1>(); } + SizeType& GetSizeAndIsAllocated() { return metadata_.template get<1>(); } - const size_type& GetSizeAndIsAllocated() const { + const SizeType& GetSizeAndIsAllocated() const { return metadata_.template get<1>(); } - size_type GetSize() const { return GetSizeAndIsAllocated() >> 1; } + SizeType GetSize() const { return GetSizeAndIsAllocated() >> 1; } bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; } - pointer GetAllocatedData() { return data_.allocated.allocated_data; } + Pointer GetAllocatedData() { return data_.allocated.allocated_data; } - const_pointer GetAllocatedData() const { + ConstPointer GetAllocatedData() const { return data_.allocated.allocated_data; } - pointer GetInlinedData() { - return reinterpret_cast( + Pointer GetInlinedData() { + return reinterpret_cast>( std::addressof(data_.inlined.inlined_data[0])); } - const_pointer GetInlinedData() const { - return reinterpret_cast( + ConstPointer GetInlinedData() const { + return reinterpret_cast>( std::addressof(data_.inlined.inlined_data[0])); } - size_type GetAllocatedCapacity() const { + SizeType GetAllocatedCapacity() const { return data_.allocated.allocated_capacity; } - size_type GetInlinedCapacity() const { return static_cast(N); } + SizeType GetInlinedCapacity() const { return static_cast>(N); } - StorageView MakeStorageView() { - return GetIsAllocated() - ? StorageView{GetAllocatedData(), GetSize(), - GetAllocatedCapacity()} - : StorageView{GetInlinedData(), GetSize(), GetInlinedCapacity()}; + StorageView MakeStorageView() { + return GetIsAllocated() ? StorageView{GetAllocatedData(), GetSize(), + GetAllocatedCapacity()} + : StorageView{GetInlinedData(), GetSize(), + GetInlinedCapacity()}; } - allocator_type* GetAllocPtr() { - return std::addressof(metadata_.template get<0>()); - } + A& GetAllocator() { return metadata_.template get<0>(); } - const allocator_type* GetAllocPtr() const { - return std::addressof(metadata_.template get<0>()); - } + const A& GetAllocator() const { return metadata_.template get<0>(); } // --------------------------------------------------------------------------- // Storage Member Mutators // --------------------------------------------------------------------------- - template - void Initialize(ValueAdapter values, size_type new_size); + ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other); template - void Assign(ValueAdapter values, size_type new_size); + void Initialize(ValueAdapter values, SizeType new_size); template - void Resize(ValueAdapter values, size_type new_size); + void Assign(ValueAdapter values, SizeType new_size); template - iterator Insert(const_iterator pos, ValueAdapter values, - size_type insert_count); + void Resize(ValueAdapter values, SizeType new_size); + + template + Iterator Insert(ConstIterator pos, ValueAdapter values, + SizeType insert_count); template - reference EmplaceBack(Args&&... args); + Reference EmplaceBack(Args&&... args); - iterator Erase(const_iterator from, const_iterator to); + Iterator Erase(ConstIterator from, ConstIterator to); - void Reserve(size_type requested_capacity); + void Reserve(SizeType requested_capacity); void ShrinkToFit(); void Swap(Storage* other_storage_ptr); void SetIsAllocated() { - GetSizeAndIsAllocated() |= static_cast(1); + GetSizeAndIsAllocated() |= static_cast>(1); } void UnsetIsAllocated() { - GetSizeAndIsAllocated() &= ((std::numeric_limits::max)() - 1); + GetSizeAndIsAllocated() &= ((std::numeric_limits>::max)() - 1); } - void SetSize(size_type size) { + void SetSize(SizeType size) { GetSizeAndIsAllocated() = - (size << 1) | static_cast(GetIsAllocated()); + (size << 1) | static_cast>(GetIsAllocated()); } - void SetAllocatedSize(size_type size) { - GetSizeAndIsAllocated() = (size << 1) | static_cast(1); + void SetAllocatedSize(SizeType size) { + GetSizeAndIsAllocated() = (size << 1) | static_cast>(1); } - void SetInlinedSize(size_type size) { - GetSizeAndIsAllocated() = size << static_cast(1); + void SetInlinedSize(SizeType size) { + GetSizeAndIsAllocated() = size << static_cast>(1); } - void AddSize(size_type count) { - GetSizeAndIsAllocated() += count << static_cast(1); + void AddSize(SizeType count) { + GetSizeAndIsAllocated() += count << static_cast>(1); } - void SubtractSize(size_type count) { - assert(count <= GetSize()); + void SubtractSize(SizeType count) { + ABSL_HARDENING_ASSERT(count <= GetSize()); - GetSizeAndIsAllocated() -= count << static_cast(1); + GetSizeAndIsAllocated() -= count << static_cast>(1); } - void SetAllocatedData(pointer data, size_type capacity) { - data_.allocated.allocated_data = data; - data_.allocated.allocated_capacity = capacity; - } - - void AcquireAllocatedData(AllocationTransaction* allocation_tx_ptr) { - SetAllocatedData(allocation_tx_ptr->GetData(), - allocation_tx_ptr->GetCapacity()); - - allocation_tx_ptr->Reset(); + void SetAllocation(Allocation allocation) { + data_.allocated.allocated_data = allocation.data; + data_.allocated.allocated_capacity = allocation.capacity; } void MemcpyFrom(const Storage& other_storage) { - assert(IsMemcpyOk::value || other_storage.GetIsAllocated()); + ABSL_HARDENING_ASSERT(IsMemcpyOk::value || + other_storage.GetIsAllocated()); GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated(); data_ = other_storage.data_; @@ -439,22 +450,23 @@ class Storage { void DeallocateIfAllocated() { if (GetIsAllocated()) { - AllocatorTraits::deallocate(*GetAllocPtr(), GetAllocatedData(), - GetAllocatedCapacity()); + MallocAdapter::Deallocate(GetAllocator(), GetAllocatedData(), + GetAllocatedCapacity()); } } private: - using Metadata = - container_internal::CompressedTuple; + ABSL_ATTRIBUTE_NOINLINE void DestroyContents(); + + using Metadata = container_internal::CompressedTuple>; struct Allocated { - pointer allocated_data; - size_type allocated_capacity; + Pointer allocated_data; + SizeType allocated_capacity; }; struct Inlined { - alignas(value_type) char inlined_data[sizeof(value_type[N])]; + alignas(ValueType) char inlined_data[sizeof(ValueType[N])]; }; union Data { @@ -462,33 +474,75 @@ class Storage { Inlined inlined; }; + template + ABSL_ATTRIBUTE_NOINLINE Reference EmplaceBackSlow(Args&&... args); + Metadata metadata_; Data data_; }; +template +void Storage::DestroyContents() { + Pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData(); + DestroyAdapter::DestroyElements(GetAllocator(), data, GetSize()); + DeallocateIfAllocated(); +} + +template +void Storage::InitFrom(const Storage& other) { + const SizeType n = other.GetSize(); + ABSL_HARDENING_ASSERT(n > 0); // Empty sources handled handled in caller. + ConstPointer src; + Pointer dst; + if (!other.GetIsAllocated()) { + dst = GetInlinedData(); + src = other.GetInlinedData(); + } else { + // Because this is only called from the `InlinedVector` constructors, it's + // safe to take on the allocation with size `0`. If `ConstructElements(...)` + // throws, deallocation will be automatically handled by `~Storage()`. + SizeType requested_capacity = ComputeCapacity(GetInlinedCapacity(), n); + Allocation allocation = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + SetAllocation(allocation); + dst = allocation.data; + src = other.GetAllocatedData(); + } + if (IsMemcpyOk::value) { + std::memcpy(reinterpret_cast(dst), + reinterpret_cast(src), n * sizeof(ValueType)); + } else { + auto values = IteratorValueAdapter>(src); + ConstructElements(GetAllocator(), dst, values, n); + } + GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated(); +} + template template -auto Storage::Initialize(ValueAdapter values, size_type new_size) +auto Storage::Initialize(ValueAdapter values, SizeType new_size) -> void { // Only callable from constructors! - assert(!GetIsAllocated()); - assert(GetSize() == 0); + ABSL_HARDENING_ASSERT(!GetIsAllocated()); + ABSL_HARDENING_ASSERT(GetSize() == 0); - pointer construct_data; + Pointer construct_data; if (new_size > GetInlinedCapacity()) { // Because this is only called from the `InlinedVector` constructors, it's // safe to take on the allocation with size `0`. If `ConstructElements(...)` // throws, deallocation will be automatically handled by `~Storage()`. - size_type new_capacity = ComputeCapacity(GetInlinedCapacity(), new_size); - construct_data = AllocatorTraits::allocate(*GetAllocPtr(), new_capacity); - SetAllocatedData(construct_data, new_capacity); + SizeType requested_capacity = + ComputeCapacity(GetInlinedCapacity(), new_size); + Allocation allocation = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + construct_data = allocation.data; + SetAllocation(allocation); SetIsAllocated(); } else { construct_data = GetInlinedData(); } - inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data, - &values, new_size); + ConstructElements(GetAllocator(), construct_data, values, new_size); // Since the initial size was guaranteed to be `0` and the allocated bit is // already correct for either case, *adding* `new_size` gives us the correct @@ -498,18 +552,20 @@ auto Storage::Initialize(ValueAdapter values, size_type new_size) template template -auto Storage::Assign(ValueAdapter values, size_type new_size) -> void { - StorageView storage_view = MakeStorageView(); +auto Storage::Assign(ValueAdapter values, SizeType new_size) + -> void { + StorageView storage_view = MakeStorageView(); - AllocationTransaction allocation_tx(GetAllocPtr()); + AllocationTransaction allocation_tx(GetAllocator()); - absl::Span assign_loop; - absl::Span construct_loop; - absl::Span destroy_loop; + absl::Span> assign_loop; + absl::Span> construct_loop; + absl::Span> destroy_loop; if (new_size > storage_view.capacity) { - size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size); - construct_loop = {allocation_tx.Allocate(new_capacity), new_size}; + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + construct_loop = {allocation_tx.Allocate(requested_capacity), new_size}; destroy_loop = {storage_view.data, storage_view.size}; } else if (new_size > storage_view.size) { assign_loop = {storage_view.data, storage_view.size}; @@ -520,18 +576,17 @@ auto Storage::Assign(ValueAdapter values, size_type new_size) -> void { destroy_loop = {storage_view.data + new_size, storage_view.size - new_size}; } - inlined_vector_internal::AssignElements(assign_loop.data(), &values, - assign_loop.size()); + AssignElements(assign_loop.data(), values, assign_loop.size()); - inlined_vector_internal::ConstructElements( - GetAllocPtr(), construct_loop.data(), &values, construct_loop.size()); + ConstructElements(GetAllocator(), construct_loop.data(), values, + construct_loop.size()); - inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(), - destroy_loop.size()); + DestroyAdapter::DestroyElements(GetAllocator(), destroy_loop.data(), + destroy_loop.size()); if (allocation_tx.DidAllocate()) { DeallocateIfAllocated(); - AcquireAllocatedData(&allocation_tx); + SetAllocation(std::move(allocation_tx).Release()); SetIsAllocated(); } @@ -540,125 +595,120 @@ auto Storage::Assign(ValueAdapter values, size_type new_size) -> void { template template -auto Storage::Resize(ValueAdapter values, size_type new_size) -> void { - StorageView storage_view = MakeStorageView(); - - IteratorValueAdapter move_values( - MoveIterator(storage_view.data)); - - AllocationTransaction allocation_tx(GetAllocPtr()); - ConstructionTransaction construction_tx(GetAllocPtr()); - - absl::Span construct_loop; - absl::Span move_construct_loop; - absl::Span destroy_loop; - - if (new_size > storage_view.capacity) { - size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size); - pointer new_data = allocation_tx.Allocate(new_capacity); - construct_loop = {new_data + storage_view.size, - new_size - storage_view.size}; - move_construct_loop = {new_data, storage_view.size}; - destroy_loop = {storage_view.data, storage_view.size}; - } else if (new_size > storage_view.size) { - construct_loop = {storage_view.data + storage_view.size, - new_size - storage_view.size}; +auto Storage::Resize(ValueAdapter values, SizeType new_size) + -> void { + StorageView storage_view = MakeStorageView(); + Pointer const base = storage_view.data; + const SizeType size = storage_view.size; + A& alloc = GetAllocator(); + if (new_size <= size) { + // Destroy extra old elements. + DestroyAdapter::DestroyElements(alloc, base + new_size, size - new_size); + } else if (new_size <= storage_view.capacity) { + // Construct new elements in place. + ConstructElements(alloc, base + size, values, new_size - size); } else { - destroy_loop = {storage_view.data + new_size, storage_view.size - new_size}; - } + // Steps: + // a. Allocate new backing store. + // b. Construct new elements in new backing store. + // c. Move existing elements from old backing store to now. + // d. Destroy all elements in old backing store. + // Use transactional wrappers for the first two steps so we can roll + // back if necessary due to exceptions. + AllocationTransaction allocation_tx(alloc); + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + Pointer new_data = allocation_tx.Allocate(requested_capacity); - construction_tx.Construct(construct_loop.data(), &values, - construct_loop.size()); + ConstructionTransaction construction_tx(alloc); + construction_tx.Construct(new_data + size, values, new_size - size); - inlined_vector_internal::ConstructElements( - GetAllocPtr(), move_construct_loop.data(), &move_values, - move_construct_loop.size()); + IteratorValueAdapter> move_values( + (MoveIterator(base))); + ConstructElements(alloc, new_data, move_values, size); - inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(), - destroy_loop.size()); - - construction_tx.Commit(); - if (allocation_tx.DidAllocate()) { + DestroyAdapter::DestroyElements(alloc, base, size); + std::move(construction_tx).Commit(); DeallocateIfAllocated(); - AcquireAllocatedData(&allocation_tx); + SetAllocation(std::move(allocation_tx).Release()); SetIsAllocated(); } - SetSize(new_size); } template template -auto Storage::Insert(const_iterator pos, ValueAdapter values, - size_type insert_count) -> iterator { - StorageView storage_view = MakeStorageView(); +auto Storage::Insert(ConstIterator pos, ValueAdapter values, + SizeType insert_count) -> Iterator { + StorageView storage_view = MakeStorageView(); - size_type insert_index = - std::distance(const_iterator(storage_view.data), pos); - size_type insert_end_index = insert_index + insert_count; - size_type new_size = storage_view.size + insert_count; + SizeType insert_index = + std::distance(ConstIterator(storage_view.data), pos); + SizeType insert_end_index = insert_index + insert_count; + SizeType new_size = storage_view.size + insert_count; if (new_size > storage_view.capacity) { - AllocationTransaction allocation_tx(GetAllocPtr()); - ConstructionTransaction construction_tx(GetAllocPtr()); - ConstructionTransaction move_construciton_tx(GetAllocPtr()); + AllocationTransaction allocation_tx(GetAllocator()); + ConstructionTransaction construction_tx(GetAllocator()); + ConstructionTransaction move_construction_tx(GetAllocator()); - IteratorValueAdapter move_values( - MoveIterator(storage_view.data)); + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data)); - size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size); - pointer new_data = allocation_tx.Allocate(new_capacity); + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + Pointer new_data = allocation_tx.Allocate(requested_capacity); - construction_tx.Construct(new_data + insert_index, &values, insert_count); + construction_tx.Construct(new_data + insert_index, values, insert_count); - move_construciton_tx.Construct(new_data, &move_values, insert_index); + move_construction_tx.Construct(new_data, move_values, insert_index); - inlined_vector_internal::ConstructElements( - GetAllocPtr(), new_data + insert_end_index, &move_values, - storage_view.size - insert_index); + ConstructElements(GetAllocator(), new_data + insert_end_index, + move_values, storage_view.size - insert_index); - inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data, - storage_view.size); + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, + storage_view.size); - construction_tx.Commit(); - move_construciton_tx.Commit(); + std::move(construction_tx).Commit(); + std::move(move_construction_tx).Commit(); DeallocateIfAllocated(); - AcquireAllocatedData(&allocation_tx); + SetAllocation(std::move(allocation_tx).Release()); SetAllocatedSize(new_size); - return iterator(new_data + insert_index); + return Iterator(new_data + insert_index); } else { - size_type move_construction_destination_index = + SizeType move_construction_destination_index = (std::max)(insert_end_index, storage_view.size); - ConstructionTransaction move_construction_tx(GetAllocPtr()); + ConstructionTransaction move_construction_tx(GetAllocator()); - IteratorValueAdapter move_construction_values( - MoveIterator(storage_view.data + - (move_construction_destination_index - insert_count))); - absl::Span move_construction = { + IteratorValueAdapter> move_construction_values( + MoveIterator(storage_view.data + + (move_construction_destination_index - insert_count))); + absl::Span> move_construction = { storage_view.data + move_construction_destination_index, new_size - move_construction_destination_index}; - pointer move_assignment_values = storage_view.data + insert_index; - absl::Span move_assignment = { + Pointer move_assignment_values = storage_view.data + insert_index; + absl::Span> move_assignment = { storage_view.data + insert_end_index, move_construction_destination_index - insert_end_index}; - absl::Span insert_assignment = {move_assignment_values, - move_construction.size()}; + absl::Span> insert_assignment = {move_assignment_values, + move_construction.size()}; - absl::Span insert_construction = { + absl::Span> insert_construction = { insert_assignment.data() + insert_assignment.size(), insert_count - insert_assignment.size()}; move_construction_tx.Construct(move_construction.data(), - &move_construction_values, + move_construction_values, move_construction.size()); - for (pointer destination = move_assignment.data() + move_assignment.size(), - last_destination = move_assignment.data(), - source = move_assignment_values + move_assignment.size(); + for (Pointer + destination = move_assignment.data() + move_assignment.size(), + last_destination = move_assignment.data(), + source = move_assignment_values + move_assignment.size(); ;) { --destination; --source; @@ -666,157 +716,164 @@ auto Storage::Insert(const_iterator pos, ValueAdapter values, *destination = std::move(*source); } - inlined_vector_internal::AssignElements(insert_assignment.data(), &values, - insert_assignment.size()); + AssignElements(insert_assignment.data(), values, + insert_assignment.size()); - inlined_vector_internal::ConstructElements( - GetAllocPtr(), insert_construction.data(), &values, - insert_construction.size()); + ConstructElements(GetAllocator(), insert_construction.data(), values, + insert_construction.size()); - move_construction_tx.Commit(); + std::move(move_construction_tx).Commit(); AddSize(insert_count); - return iterator(storage_view.data + insert_index); + return Iterator(storage_view.data + insert_index); } } template template -auto Storage::EmplaceBack(Args&&... args) -> reference { - StorageView storage_view = MakeStorageView(); - - AllocationTransaction allocation_tx(GetAllocPtr()); - - IteratorValueAdapter move_values( - MoveIterator(storage_view.data)); - - pointer construct_data; - if (storage_view.size == storage_view.capacity) { - size_type new_capacity = NextCapacity(storage_view.capacity); - construct_data = allocation_tx.Allocate(new_capacity); - } else { - construct_data = storage_view.data; +auto Storage::EmplaceBack(Args&&... args) -> Reference { + StorageView storage_view = MakeStorageView(); + const SizeType n = storage_view.size; + if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) { + // Fast path; new element fits. + Pointer last_ptr = storage_view.data + n; + AllocatorTraits::construct(GetAllocator(), last_ptr, + std::forward(args)...); + AddSize(1); + return *last_ptr; } + // TODO(b/173712035): Annotate with musttail attribute to prevent regression. + return EmplaceBackSlow(std::forward(args)...); +} - pointer last_ptr = construct_data + storage_view.size; +template +template +auto Storage::EmplaceBackSlow(Args&&... args) -> Reference { + StorageView storage_view = MakeStorageView(); + AllocationTransaction allocation_tx(GetAllocator()); + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data)); + SizeType requested_capacity = NextCapacity(storage_view.capacity); + Pointer construct_data = allocation_tx.Allocate(requested_capacity); + Pointer last_ptr = construct_data + storage_view.size; - AllocatorTraits::construct(*GetAllocPtr(), last_ptr, - std::forward(args)...); - - if (allocation_tx.DidAllocate()) { - ABSL_INTERNAL_TRY { - inlined_vector_internal::ConstructElements( - GetAllocPtr(), allocation_tx.GetData(), &move_values, - storage_view.size); - } - ABSL_INTERNAL_CATCH_ANY { - AllocatorTraits::destroy(*GetAllocPtr(), last_ptr); - ABSL_INTERNAL_RETHROW; - } - - inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data, - storage_view.size); - - DeallocateIfAllocated(); - AcquireAllocatedData(&allocation_tx); - SetIsAllocated(); + // Construct new element. + AllocatorTraits::construct(GetAllocator(), last_ptr, + std::forward(args)...); + // Move elements from old backing store to new backing store. + ABSL_INTERNAL_TRY { + ConstructElements(GetAllocator(), allocation_tx.GetData(), move_values, + storage_view.size); } + ABSL_INTERNAL_CATCH_ANY { + AllocatorTraits::destroy(GetAllocator(), last_ptr); + ABSL_INTERNAL_RETHROW; + } + // Destroy elements in old backing store. + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, + storage_view.size); + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); AddSize(1); return *last_ptr; } template -auto Storage::Erase(const_iterator from, const_iterator to) - -> iterator { - StorageView storage_view = MakeStorageView(); +auto Storage::Erase(ConstIterator from, ConstIterator to) + -> Iterator { + StorageView storage_view = MakeStorageView(); - size_type erase_size = std::distance(from, to); - size_type erase_index = - std::distance(const_iterator(storage_view.data), from); - size_type erase_end_index = erase_index + erase_size; + SizeType erase_size = std::distance(from, to); + SizeType erase_index = + std::distance(ConstIterator(storage_view.data), from); + SizeType erase_end_index = erase_index + erase_size; - IteratorValueAdapter move_values( - MoveIterator(storage_view.data + erase_end_index)); + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data + erase_end_index)); - inlined_vector_internal::AssignElements(storage_view.data + erase_index, - &move_values, - storage_view.size - erase_end_index); + AssignElements(storage_view.data + erase_index, move_values, + storage_view.size - erase_end_index); - inlined_vector_internal::DestroyElements( - GetAllocPtr(), storage_view.data + (storage_view.size - erase_size), + DestroyAdapter::DestroyElements( + GetAllocator(), storage_view.data + (storage_view.size - erase_size), erase_size); SubtractSize(erase_size); - return iterator(storage_view.data + erase_index); + return Iterator(storage_view.data + erase_index); } template -auto Storage::Reserve(size_type requested_capacity) -> void { - StorageView storage_view = MakeStorageView(); +auto Storage::Reserve(SizeType requested_capacity) -> void { + StorageView storage_view = MakeStorageView(); if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return; - AllocationTransaction allocation_tx(GetAllocPtr()); + AllocationTransaction allocation_tx(GetAllocator()); - IteratorValueAdapter move_values( - MoveIterator(storage_view.data)); + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data)); - size_type new_capacity = + SizeType new_requested_capacity = ComputeCapacity(storage_view.capacity, requested_capacity); - pointer new_data = allocation_tx.Allocate(new_capacity); + Pointer new_data = allocation_tx.Allocate(new_requested_capacity); - inlined_vector_internal::ConstructElements(GetAllocPtr(), new_data, - &move_values, storage_view.size); + ConstructElements(GetAllocator(), new_data, move_values, + storage_view.size); - inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data, - storage_view.size); + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, + storage_view.size); DeallocateIfAllocated(); - AcquireAllocatedData(&allocation_tx); + SetAllocation(std::move(allocation_tx).Release()); SetIsAllocated(); } template auto Storage::ShrinkToFit() -> void { // May only be called on allocated instances! - assert(GetIsAllocated()); + ABSL_HARDENING_ASSERT(GetIsAllocated()); - StorageView storage_view{GetAllocatedData(), GetSize(), - GetAllocatedCapacity()}; + StorageView storage_view{GetAllocatedData(), GetSize(), + GetAllocatedCapacity()}; if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return; - AllocationTransaction allocation_tx(GetAllocPtr()); + AllocationTransaction allocation_tx(GetAllocator()); - IteratorValueAdapter move_values( - MoveIterator(storage_view.data)); + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data)); - pointer construct_data; + Pointer construct_data; if (storage_view.size > GetInlinedCapacity()) { - size_type new_capacity = storage_view.size; - construct_data = allocation_tx.Allocate(new_capacity); + SizeType requested_capacity = storage_view.size; + construct_data = allocation_tx.Allocate(requested_capacity); + if (allocation_tx.GetCapacity() >= storage_view.capacity) { + // Already using the smallest available heap allocation. + return; + } } else { construct_data = GetInlinedData(); } ABSL_INTERNAL_TRY { - inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data, - &move_values, storage_view.size); + ConstructElements(GetAllocator(), construct_data, move_values, + storage_view.size); } ABSL_INTERNAL_CATCH_ANY { - SetAllocatedData(storage_view.data, storage_view.capacity); + SetAllocation({storage_view.data, storage_view.capacity}); ABSL_INTERNAL_RETHROW; } - inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data, - storage_view.size); + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, + storage_view.size); - AllocatorTraits::deallocate(*GetAllocPtr(), storage_view.data, - storage_view.capacity); + MallocAdapter::Deallocate(GetAllocator(), storage_view.data, + storage_view.capacity); if (allocation_tx.DidAllocate()) { - AcquireAllocatedData(&allocation_tx); + SetAllocation(std::move(allocation_tx).Release()); } else { UnsetIsAllocated(); } @@ -825,7 +882,7 @@ auto Storage::ShrinkToFit() -> void { template auto Storage::Swap(Storage* other_storage_ptr) -> void { using std::swap; - assert(this != other_storage_ptr); + ABSL_HARDENING_ASSERT(this != other_storage_ptr); if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) { swap(data_.allocated, other_storage_ptr->data_.allocated); @@ -834,20 +891,20 @@ auto Storage::Swap(Storage* other_storage_ptr) -> void { Storage* large_ptr = other_storage_ptr; if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr); - for (size_type i = 0; i < small_ptr->GetSize(); ++i) { + for (SizeType i = 0; i < small_ptr->GetSize(); ++i) { swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]); } - IteratorValueAdapter move_values( - MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize())); + IteratorValueAdapter> move_values( + MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize())); - inlined_vector_internal::ConstructElements( - large_ptr->GetAllocPtr(), - small_ptr->GetInlinedData() + small_ptr->GetSize(), &move_values, - large_ptr->GetSize() - small_ptr->GetSize()); + ConstructElements(large_ptr->GetAllocator(), + small_ptr->GetInlinedData() + small_ptr->GetSize(), + move_values, + large_ptr->GetSize() - small_ptr->GetSize()); - inlined_vector_internal::DestroyElements( - large_ptr->GetAllocPtr(), + DestroyAdapter::DestroyElements( + large_ptr->GetAllocator(), large_ptr->GetInlinedData() + small_ptr->GetSize(), large_ptr->GetSize() - small_ptr->GetSize()); } else { @@ -855,36 +912,41 @@ auto Storage::Swap(Storage* other_storage_ptr) -> void { Storage* inlined_ptr = other_storage_ptr; if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr); - StorageView allocated_storage_view{allocated_ptr->GetAllocatedData(), - allocated_ptr->GetSize(), - allocated_ptr->GetAllocatedCapacity()}; + StorageView allocated_storage_view{ + allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(), + allocated_ptr->GetAllocatedCapacity()}; - IteratorValueAdapter move_values( - MoveIterator(inlined_ptr->GetInlinedData())); + IteratorValueAdapter> move_values( + MoveIterator(inlined_ptr->GetInlinedData())); ABSL_INTERNAL_TRY { - inlined_vector_internal::ConstructElements( - inlined_ptr->GetAllocPtr(), allocated_ptr->GetInlinedData(), - &move_values, inlined_ptr->GetSize()); + ConstructElements(inlined_ptr->GetAllocator(), + allocated_ptr->GetInlinedData(), move_values, + inlined_ptr->GetSize()); } ABSL_INTERNAL_CATCH_ANY { - allocated_ptr->SetAllocatedData(allocated_storage_view.data, - allocated_storage_view.capacity); + allocated_ptr->SetAllocation(Allocation{ + allocated_storage_view.data, allocated_storage_view.capacity}); ABSL_INTERNAL_RETHROW; } - inlined_vector_internal::DestroyElements(inlined_ptr->GetAllocPtr(), - inlined_ptr->GetInlinedData(), - inlined_ptr->GetSize()); + DestroyAdapter::DestroyElements(inlined_ptr->GetAllocator(), + inlined_ptr->GetInlinedData(), + inlined_ptr->GetSize()); - inlined_ptr->SetAllocatedData(allocated_storage_view.data, - allocated_storage_view.capacity); + inlined_ptr->SetAllocation(Allocation{allocated_storage_view.data, + allocated_storage_view.capacity}); } swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated()); - swap(*GetAllocPtr(), *other_storage_ptr->GetAllocPtr()); + swap(GetAllocator(), other_storage_ptr->GetAllocator()); } +// End ignore "array-bounds" and "maybe-uninitialized" +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + } // namespace inlined_vector_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/layout.h b/TMessagesProj/jni/voip/webrtc/absl/container/internal/layout.h index 69cc85dd6..a59a24305 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/layout.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/layout.h @@ -163,6 +163,7 @@ #include #include #include + #include #include #include @@ -170,15 +171,16 @@ #include #include -#ifdef ADDRESS_SANITIZER -#include -#endif - +#include "absl/base/config.h" #include "absl/meta/type_traits.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "absl/utility/utility.h" +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +#include +#endif + #if defined(__GXX_RTTI) #define ABSL_INTERNAL_HAS_CXA_DEMANGLE #endif @@ -402,7 +404,7 @@ class LayoutImpl, absl::index_sequence, constexpr size_t Offset() const { static_assert(N < NumOffsets, "Index out of bounds"); return adl_barrier::Align( - Offset() + SizeOf>() * size_[N - 1], + Offset() + SizeOf>::value * size_[N - 1], ElementAlignment::value); } @@ -595,7 +597,7 @@ class LayoutImpl, absl::index_sequence, constexpr size_t AllocSize() const { static_assert(NumTypes == NumSizes, "You must specify sizes of all fields"); return Offset() + - SizeOf>() * size_[NumTypes - 1]; + SizeOf>::value * size_[NumTypes - 1]; } // If built with --config=asan, poisons padding bytes (if any) in the @@ -614,12 +616,12 @@ class LayoutImpl, absl::index_sequence, void PoisonPadding(const Char* p) const { static_assert(N < NumOffsets, "Index out of bounds"); (void)p; -#ifdef ADDRESS_SANITIZER +#ifdef ABSL_HAVE_ADDRESS_SANITIZER PoisonPadding(p); // The `if` is an optimization. It doesn't affect the observable behaviour. if (ElementAlignment::value % ElementAlignment::value) { size_t start = - Offset() + SizeOf>() * size_[N - 1]; + Offset() + SizeOf>::value * size_[N - 1]; ASAN_POISON_MEMORY_REGION(p + start, Offset() - start); } #endif @@ -643,7 +645,7 @@ class LayoutImpl, absl::index_sequence, // produce "unsigned*" where another produces "unsigned int *". std::string DebugString() const { const auto offsets = Offsets(); - const size_t sizes[] = {SizeOf>()...}; + const size_t sizes[] = {SizeOf>::value...}; const std::string types[] = { adl_barrier::TypeName>()...}; std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")"); diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/layout_benchmark.cc b/TMessagesProj/jni/voip/webrtc/absl/container/internal/layout_benchmark.cc new file mode 100644 index 000000000..d8636e8d5 --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/layout_benchmark.cc @@ -0,0 +1,122 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Every benchmark should have the same performance as the corresponding +// headroom benchmark. + +#include "absl/base/internal/raw_logging.h" +#include "absl/container/internal/layout.h" +#include "benchmark/benchmark.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::benchmark::DoNotOptimize; + +using Int128 = int64_t[2]; + +// This benchmark provides the upper bound on performance for BM_OffsetConstant. +template +void BM_OffsetConstantHeadroom(benchmark::State& state) { + for (auto _ : state) { + DoNotOptimize(Offset); + } +} + +template +void BM_OffsetConstant(benchmark::State& state) { + using L = Layout; + ABSL_RAW_CHECK(L::Partial(3, 5, 7).template Offset<3>() == Offset, + "Invalid offset"); + for (auto _ : state) { + DoNotOptimize(L::Partial(3, 5, 7).template Offset<3>()); + } +} + +template +size_t VariableOffset(size_t n, size_t m, size_t k); + +template <> +size_t VariableOffset(size_t n, size_t m, + size_t k) { + auto Align = [](size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }; + return Align(Align(Align(n * 1, 2) + m * 2, 4) + k * 4, 8); +} + +template <> +size_t VariableOffset(size_t n, size_t m, + size_t k) { + // No alignment is necessary. + return n * 16 + m * 4 + k * 2; +} + +// This benchmark provides the upper bound on performance for BM_OffsetVariable. +template +void BM_OffsetVariableHeadroom(benchmark::State& state) { + size_t n = 3; + size_t m = 5; + size_t k = 7; + ABSL_RAW_CHECK(VariableOffset(n, m, k) == Offset, "Invalid offset"); + for (auto _ : state) { + DoNotOptimize(n); + DoNotOptimize(m); + DoNotOptimize(k); + DoNotOptimize(VariableOffset(n, m, k)); + } +} + +template +void BM_OffsetVariable(benchmark::State& state) { + using L = Layout; + size_t n = 3; + size_t m = 5; + size_t k = 7; + ABSL_RAW_CHECK(L::Partial(n, m, k).template Offset<3>() == Offset, + "Inavlid offset"); + for (auto _ : state) { + DoNotOptimize(n); + DoNotOptimize(m); + DoNotOptimize(k); + DoNotOptimize(L::Partial(n, m, k).template Offset<3>()); + } +} + +// Run all benchmarks in two modes: +// +// Layout with padding: int8_t[3], int16_t[5], int32_t[7], Int128[?]. +// Layout without padding: Int128[3], int32_t[5], int16_t[7], int8_t[?]. + +#define OFFSET_BENCHMARK(NAME, OFFSET, T1, T2, T3, T4) \ + auto& NAME##_##OFFSET##_##T1##_##T2##_##T3##_##T4 = \ + NAME; \ + BENCHMARK(NAME##_##OFFSET##_##T1##_##T2##_##T3##_##T4) + +OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 48, int8_t, int16_t, int32_t, + Int128); +OFFSET_BENCHMARK(BM_OffsetConstant, 48, int8_t, int16_t, int32_t, Int128); +OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 82, Int128, int32_t, int16_t, + int8_t); +OFFSET_BENCHMARK(BM_OffsetConstant, 82, Int128, int32_t, int16_t, int8_t); +OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 48, int8_t, int16_t, int32_t, + Int128); +OFFSET_BENCHMARK(BM_OffsetVariable, 48, int8_t, int16_t, int32_t, Int128); +OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 82, Int128, int32_t, int16_t, + int8_t); +OFFSET_BENCHMARK(BM_OffsetVariable, 82, Int128, int32_t, int16_t, int8_t); +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/layout_test.cc b/TMessagesProj/jni/voip/webrtc/absl/container/internal/layout_test.cc new file mode 100644 index 000000000..54e5d5bbb --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/layout_test.cc @@ -0,0 +1,1641 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/layout.h" + +// We need ::max_align_t because some libstdc++ versions don't provide +// std::max_align_t +#include + +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::absl::Span; +using ::testing::ElementsAre; + +size_t Distance(const void* from, const void* to) { + ABSL_RAW_CHECK(from <= to, "Distance must be non-negative"); + return static_cast(to) - static_cast(from); +} + +template +Expected Type(Actual val) { + static_assert(std::is_same(), ""); + return val; +} + +// Helper classes to test different size and alignments. +struct alignas(8) Int128 { + uint64_t a, b; + friend bool operator==(Int128 lhs, Int128 rhs) { + return std::tie(lhs.a, lhs.b) == std::tie(rhs.a, rhs.b); + } + + static std::string Name() { + return internal_layout::adl_barrier::TypeName(); + } +}; + +// int64_t is *not* 8-byte aligned on all platforms! +struct alignas(8) Int64 { + int64_t a; + friend bool operator==(Int64 lhs, Int64 rhs) { + return lhs.a == rhs.a; + } +}; + +// Properties of types that this test relies on. +static_assert(sizeof(int8_t) == 1, ""); +static_assert(alignof(int8_t) == 1, ""); +static_assert(sizeof(int16_t) == 2, ""); +static_assert(alignof(int16_t) == 2, ""); +static_assert(sizeof(int32_t) == 4, ""); +static_assert(alignof(int32_t) == 4, ""); +static_assert(sizeof(Int64) == 8, ""); +static_assert(alignof(Int64) == 8, ""); +static_assert(sizeof(Int128) == 16, ""); +static_assert(alignof(Int128) == 8, ""); + +template +void SameType() { + static_assert(std::is_same(), ""); +} + +TEST(Layout, ElementType) { + { + using L = Layout; + SameType>(); + SameType>(); + SameType>(); + } + { + using L = Layout; + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + } + { + using L = Layout; + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + SameType>(); + } +} + +TEST(Layout, ElementTypes) { + { + using L = Layout; + SameType, L::ElementTypes>(); + SameType, decltype(L::Partial())::ElementTypes>(); + SameType, decltype(L::Partial(0))::ElementTypes>(); + } + { + using L = Layout; + SameType, L::ElementTypes>(); + SameType, + decltype(L::Partial())::ElementTypes>(); + SameType, + decltype(L::Partial(0))::ElementTypes>(); + } + { + using L = Layout; + SameType, L::ElementTypes>(); + SameType, + decltype(L::Partial())::ElementTypes>(); + SameType, + decltype(L::Partial(0))::ElementTypes>(); + SameType, + decltype(L::Partial(0, 0))::ElementTypes>(); + SameType, + decltype(L::Partial(0, 0, 0))::ElementTypes>(); + } +} + +TEST(Layout, OffsetByIndex) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(3).Offset<0>()); + EXPECT_EQ(0, L(3).Offset<0>()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(3).Offset<0>()); + EXPECT_EQ(12, L::Partial(3).Offset<1>()); + EXPECT_EQ(0, L::Partial(3, 5).Offset<0>()); + EXPECT_EQ(12, L::Partial(3, 5).Offset<1>()); + EXPECT_EQ(0, L(3, 5).Offset<0>()); + EXPECT_EQ(12, L(3, 5).Offset<1>()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset<0>()); + EXPECT_EQ(0, L::Partial(0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0).Offset<1>()); + EXPECT_EQ(0, L::Partial(1).Offset<0>()); + EXPECT_EQ(4, L::Partial(1).Offset<1>()); + EXPECT_EQ(0, L::Partial(5).Offset<0>()); + EXPECT_EQ(8, L::Partial(5).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(1, 0).Offset<0>()); + EXPECT_EQ(4, L::Partial(1, 0).Offset<1>()); + EXPECT_EQ(8, L::Partial(1, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(5, 3).Offset<0>()); + EXPECT_EQ(8, L::Partial(5, 3).Offset<1>()); + EXPECT_EQ(24, L::Partial(5, 3).Offset<2>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<0>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<1>()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<0>()); + EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<1>()); + EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<2>()); + EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<0>()); + EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<2>()); + EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<1>()); + EXPECT_EQ(0, L(5, 3, 1).Offset<0>()); + EXPECT_EQ(24, L(5, 3, 1).Offset<2>()); + EXPECT_EQ(8, L(5, 3, 1).Offset<1>()); + } +} + +TEST(Layout, OffsetByType) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset()); + EXPECT_EQ(0, L::Partial(3).Offset()); + EXPECT_EQ(0, L(3).Offset()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial().Offset()); + EXPECT_EQ(0, L::Partial(0).Offset()); + EXPECT_EQ(0, L::Partial(0).Offset()); + EXPECT_EQ(0, L::Partial(1).Offset()); + EXPECT_EQ(4, L::Partial(1).Offset()); + EXPECT_EQ(0, L::Partial(5).Offset()); + EXPECT_EQ(8, L::Partial(5).Offset()); + EXPECT_EQ(0, L::Partial(0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0).Offset()); + EXPECT_EQ(0, L::Partial(1, 0).Offset()); + EXPECT_EQ(4, L::Partial(1, 0).Offset()); + EXPECT_EQ(8, L::Partial(1, 0).Offset()); + EXPECT_EQ(0, L::Partial(5, 3).Offset()); + EXPECT_EQ(8, L::Partial(5, 3).Offset()); + EXPECT_EQ(24, L::Partial(5, 3).Offset()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(1, 0, 0).Offset()); + EXPECT_EQ(4, L::Partial(1, 0, 0).Offset()); + EXPECT_EQ(8, L::Partial(1, 0, 0).Offset()); + EXPECT_EQ(0, L::Partial(5, 3, 1).Offset()); + EXPECT_EQ(24, L::Partial(5, 3, 1).Offset()); + EXPECT_EQ(8, L::Partial(5, 3, 1).Offset()); + EXPECT_EQ(0, L(5, 3, 1).Offset()); + EXPECT_EQ(24, L(5, 3, 1).Offset()); + EXPECT_EQ(8, L(5, 3, 1).Offset()); + } +} + +TEST(Layout, Offsets) { + { + using L = Layout; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0)); + EXPECT_THAT(L(3).Offsets(), ElementsAre(0)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0, 12)); + EXPECT_THAT(L::Partial(3, 5).Offsets(), ElementsAre(0, 12)); + EXPECT_THAT(L(3, 5).Offsets(), ElementsAre(0, 12)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); + EXPECT_THAT(L::Partial(1).Offsets(), ElementsAre(0, 4)); + EXPECT_THAT(L::Partial(5).Offsets(), ElementsAre(0, 8)); + EXPECT_THAT(L::Partial(0, 0).Offsets(), ElementsAre(0, 0, 0)); + EXPECT_THAT(L::Partial(1, 0).Offsets(), ElementsAre(0, 4, 8)); + EXPECT_THAT(L::Partial(5, 3).Offsets(), ElementsAre(0, 8, 24)); + EXPECT_THAT(L::Partial(0, 0, 0).Offsets(), ElementsAre(0, 0, 0)); + EXPECT_THAT(L::Partial(1, 0, 0).Offsets(), ElementsAre(0, 4, 8)); + EXPECT_THAT(L::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24)); + EXPECT_THAT(L(5, 3, 1).Offsets(), ElementsAre(0, 8, 24)); + } +} + +TEST(Layout, AllocSize) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).AllocSize()); + EXPECT_EQ(12, L::Partial(3).AllocSize()); + EXPECT_EQ(12, L(3).AllocSize()); + } + { + using L = Layout; + EXPECT_EQ(32, L::Partial(3, 5).AllocSize()); + EXPECT_EQ(32, L(3, 5).AllocSize()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0, 0, 0).AllocSize()); + EXPECT_EQ(8, L::Partial(1, 0, 0).AllocSize()); + EXPECT_EQ(8, L::Partial(0, 1, 0).AllocSize()); + EXPECT_EQ(16, L::Partial(0, 0, 1).AllocSize()); + EXPECT_EQ(24, L::Partial(1, 1, 1).AllocSize()); + EXPECT_EQ(136, L::Partial(3, 5, 7).AllocSize()); + EXPECT_EQ(136, L(3, 5, 7).AllocSize()); + } +} + +TEST(Layout, SizeByIndex) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Size<0>()); + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L(3).Size<0>()); + } + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Size<0>()); + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L::Partial(3, 5).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5).Size<1>()); + EXPECT_EQ(3, L(3, 5).Size<0>()); + EXPECT_EQ(5, L(3, 5).Size<1>()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Size<0>()); + EXPECT_EQ(3, L::Partial(3, 5).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5).Size<1>()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Size<0>()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Size<1>()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Size<2>()); + EXPECT_EQ(3, L(3, 5, 7).Size<0>()); + EXPECT_EQ(5, L(3, 5, 7).Size<1>()); + EXPECT_EQ(7, L(3, 5, 7).Size<2>()); + } +} + +TEST(Layout, SizeByType) { + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Size()); + EXPECT_EQ(3, L::Partial(3).Size()); + EXPECT_EQ(3, L(3).Size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Size()); + EXPECT_EQ(3, L::Partial(3, 5).Size()); + EXPECT_EQ(5, L::Partial(3, 5).Size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Size()); + EXPECT_EQ(3, L(3, 5, 7).Size()); + EXPECT_EQ(5, L(3, 5, 7).Size()); + EXPECT_EQ(7, L(3, 5, 7).Size()); + } +} + +TEST(Layout, Sizes) { + { + using L = Layout; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L(3).Sizes(), ElementsAre(3)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5)); + EXPECT_THAT(L(3, 5).Sizes(), ElementsAre(3, 5)); + } + { + using L = Layout; + EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); + EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); + EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5)); + EXPECT_THAT(L::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7)); + EXPECT_THAT(L(3, 5, 7).Sizes(), ElementsAre(3, 5, 7)); + } +} + +TEST(Layout, PointerByIndex) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer<0>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(12, + Distance(p, Type(L::Partial(3).Pointer<1>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(3, 5).Pointer<0>(p)))); + EXPECT_EQ( + 12, Distance(p, Type(L::Partial(3, 5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L(3, 5).Pointer<1>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<0>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer<0>(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer<0>(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer<0>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(1, 0).Pointer<0>(p)))); + EXPECT_EQ( + 4, Distance(p, Type(L::Partial(1, 0).Pointer<1>(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(1, 0).Pointer<2>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(5, 3).Pointer<0>(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(5, 3).Pointer<1>(p)))); + EXPECT_EQ(24, + Distance(p, Type(L::Partial(5, 3).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<0>(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer<1>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(1, 0, 0).Pointer<0>(p)))); + EXPECT_EQ( + 4, + Distance(p, Type(L::Partial(1, 0, 0).Pointer<1>(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(1, 0, 0).Pointer<2>(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ( + 24, + Distance(p, Type(L::Partial(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ( + 8, + Distance(p, Type(L::Partial(5, 3, 1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer<1>(p)))); + } +} + +TEST(Layout, PointerByType) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ( + 0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(3).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer(p)))); + } + { + using L = Layout; + EXPECT_EQ( + 0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ( + 4, + Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ( + 8, + Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type( + L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(4, Distance(p, Type( + L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ( + 8, + Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type( + L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ( + 24, + Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type( + L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type( + L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type( + L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type( + L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(4, Distance(p, Type( + L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type( + L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type( + L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(24, Distance(p, Type( + L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type( + L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(24, + Distance(p, Type(L(5, 3, 1).Pointer(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + } +} + +TEST(Layout, MutablePointerByIndex) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer<0>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L::Partial(3).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L::Partial(3, 5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3, 5).Pointer<0>(p)))); + EXPECT_EQ(12, Distance(p, Type(L(3, 5).Pointer<1>(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer<0>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<0>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0).Pointer<0>(p)))); + EXPECT_EQ(4, Distance(p, Type(L::Partial(1, 0).Pointer<1>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3).Pointer<0>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(5, 3).Pointer<1>(p)))); + EXPECT_EQ(24, Distance(p, Type(L::Partial(5, 3).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<0>(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0, 0).Pointer<0>(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1, 0, 0).Pointer<1>(p)))); + EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0, 0).Pointer<2>(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, + Distance(p, Type(L::Partial(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5, 3, 1).Pointer<1>(p)))); + EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer<0>(p)))); + EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer<2>(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer<1>(p)))); + } +} + +TEST(Layout, MutablePointerByType) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(3).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L(3).Pointer(p)))); + } + { + using L = Layout; + EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(4, + Distance(p, Type(L::Partial(1).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(5).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(0, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ( + 4, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(8, + Distance(p, Type(L::Partial(1, 0).Pointer(p)))); + EXPECT_EQ(0, + Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ(24, + Distance(p, Type(L::Partial(5, 3).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, + Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 4, + Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 8, Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); + EXPECT_EQ( + 0, Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ( + 24, Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ( + 8, + Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); + EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer(p)))); + } +} + +TEST(Layout, Pointers) { + alignas(max_align_t) const unsigned char p[100] = {}; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)), + Type>(x.Pointers(p))); + } + { + const auto x = L::Partial(1); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)), + (Type>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>( + x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>( + x.Pointers(p)))); + } + { + const L x(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>( + x.Pointers(p)))); + } +} + +TEST(Layout, MutablePointers) { + alignas(max_align_t) unsigned char p[100]; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)), + Type>(x.Pointers(p))); + } + { + const auto x = L::Partial(1); + EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)), + (Type>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>(x.Pointers(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>(x.Pointers(p)))); + } + { + const L x(1, 2, 3); + EXPECT_EQ( + std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), + (Type>(x.Pointers(p)))); + } +} + +TEST(Layout, SliceByIndexSize) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L(3).Slice<0>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(5, L(3, 5).Slice<1>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size()); + } +} + +TEST(Layout, SliceByTypeSize) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L(3).Slice(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice(p).size()); + } +} + +TEST(Layout, MutableSliceByIndexSize) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L(3).Slice<0>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(5, L(3, 5).Slice<1>(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size()); + } +} + +TEST(Layout, MutableSliceByTypeSize) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ(0, L::Partial(0).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L(3).Slice(p).size()); + } + { + using L = Layout; + EXPECT_EQ(3, L::Partial(3).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5).Slice(p).size()); + EXPECT_EQ(3, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L::Partial(3, 5, 7).Slice(p).size()); + EXPECT_EQ(3, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(5, L(3, 5, 7).Slice(p).size()); + EXPECT_EQ(7, L(3, 5, 7).Slice(p).size()); + } +} + +TEST(Layout, SliceByIndexData) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L(3).Slice<0>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(3, 5).Slice<0>(p)).data())); + EXPECT_EQ( + 12, + Distance( + p, Type>(L::Partial(3, 5).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L(3, 5).Slice<0>(p)).data())); + EXPECT_EQ( + 12, Distance(p, Type>(L(3, 5).Slice<1>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(1).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(5).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(1, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, Type>(L::Partial(1, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(5, 3).Slice<0>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, Type>(L::Partial(5, 3).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, + Distance(p, Type>(L(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, + Distance(p, Type>(L(5, 3, 1).Slice<1>(p)).data())); + } +} + +TEST(Layout, SliceByTypeData) { + alignas(max_align_t) const unsigned char p[100] = {}; + { + using L = Layout; + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(3).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L(3).Slice(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(1).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(5).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0).Slice(p)) + .data())); + EXPECT_EQ(0, Distance(p, Type>( + L::Partial(0, 0).Slice(p)) + .data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(1, 0).Slice(p)) + .data())); + EXPECT_EQ(4, Distance(p, Type>( + L::Partial(1, 0).Slice(p)) + .data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(5, 3).Slice(p)) + .data())); + EXPECT_EQ(8, Distance(p, Type>( + L::Partial(5, 3).Slice(p)) + .data())); + EXPECT_EQ(0, Distance(p, Type>( + L::Partial(0, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(0, Distance(p, Type>( + L::Partial(0, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(0, Distance(p, Type>( + L::Partial(0, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(0, Distance(p, Type>( + L::Partial(1, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(4, Distance(p, Type>( + L::Partial(1, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(8, Distance(p, Type>( + L::Partial(1, 0, 0).Slice(p)) + .data())); + EXPECT_EQ(0, Distance(p, Type>( + L::Partial(5, 3, 1).Slice(p)) + .data())); + EXPECT_EQ(24, Distance(p, Type>( + L::Partial(5, 3, 1).Slice(p)) + .data())); + EXPECT_EQ(8, Distance(p, Type>( + L::Partial(5, 3, 1).Slice(p)) + .data())); + EXPECT_EQ( + 0, + Distance(p, + Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 24, + Distance(p, + Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, Type>(L(5, 3, 1).Slice(p)).data())); + } +} + +TEST(Layout, MutableSliceByIndexData) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3).Slice<0>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(3, 5).Slice<0>(p)).data())); + EXPECT_EQ( + 12, + Distance(p, Type>(L::Partial(3, 5).Slice<1>(p)).data())); + EXPECT_EQ(0, Distance(p, Type>(L(3, 5).Slice<0>(p)).data())); + EXPECT_EQ(12, Distance(p, Type>(L(3, 5).Slice<1>(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(1).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L::Partial(5).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(1, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, + Distance(p, Type>(L::Partial(1, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(5, 3).Slice<0>(p)).data())); + EXPECT_EQ( + 8, + Distance(p, Type>(L::Partial(5, 3).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(1, 0, 0).Slice<0>(p)).data())); + EXPECT_EQ( + 4, Distance( + p, Type>(L::Partial(1, 0, 0).Slice<1>(p)).data())); + EXPECT_EQ( + 8, Distance( + p, Type>(L::Partial(1, 0, 0).Slice<2>(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ( + 24, Distance( + p, Type>(L::Partial(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ( + 8, Distance( + p, Type>(L::Partial(5, 3, 1).Slice<1>(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L(5, 3, 1).Slice<0>(p)).data())); + EXPECT_EQ(24, + Distance(p, Type>(L(5, 3, 1).Slice<2>(p)).data())); + EXPECT_EQ(8, + Distance(p, Type>(L(5, 3, 1).Slice<1>(p)).data())); + } +} + +TEST(Layout, MutableSliceByTypeData) { + alignas(max_align_t) unsigned char p[100]; + { + using L = Layout; + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, Distance( + p, Type>(L::Partial(3).Slice(p)).data())); + EXPECT_EQ(0, + Distance(p, Type>(L(3).Slice(p)).data())); + } + { + using L = Layout; + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(1).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, Type>(L::Partial(5).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, + Type>(L::Partial(0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, Type>(L::Partial(0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, + Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, Type>(L::Partial(1, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance(p, + Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, Type>(L::Partial(5, 3).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(0, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 4, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(1, 0, 0).Slice(p)).data())); + EXPECT_EQ( + 0, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 24, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance( + p, + Type>(L::Partial(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 0, Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 24, + Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + EXPECT_EQ( + 8, + Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); + } +} + +MATCHER_P(IsSameSlice, slice, "") { + return arg.size() == slice.size() && arg.data() == slice.data(); +} + +template +class TupleMatcher { + public: + explicit TupleMatcher(M... matchers) : matchers_(std::move(matchers)...) {} + + template + bool MatchAndExplain(const Tuple& p, + testing::MatchResultListener* /* listener */) const { + static_assert(std::tuple_size::value == sizeof...(M), ""); + return MatchAndExplainImpl( + p, absl::make_index_sequence::value>{}); + } + + // For the matcher concept. Left empty as we don't really need the diagnostics + // right now. + void DescribeTo(::std::ostream* os) const {} + void DescribeNegationTo(::std::ostream* os) const {} + + private: + template + bool MatchAndExplainImpl(const Tuple& p, absl::index_sequence) const { + // Using std::min as a simple variadic "and". + return std::min( + {true, testing::SafeMatcherCast< + const typename std::tuple_element::type&>( + std::get(matchers_)) + .Matches(std::get(p))...}); + } + + std::tuple matchers_; +}; + +template +testing::PolymorphicMatcher> Tuple(M... matchers) { + return testing::MakePolymorphicMatcher( + TupleMatcher(std::move(matchers)...)); +} + +TEST(Layout, Slices) { + alignas(max_align_t) const unsigned char p[100] = {}; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_THAT(Type>(x.Slices(p)), Tuple()); + } + { + const auto x = L::Partial(1); + EXPECT_THAT(Type>>(x.Slices(p)), + Tuple(IsSameSlice(x.Slice<0>(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_THAT( + (Type, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_THAT((Type, Span, + Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } + { + const L x(1, 2, 3); + EXPECT_THAT((Type, Span, + Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } +} + +TEST(Layout, MutableSlices) { + alignas(max_align_t) unsigned char p[100] = {}; + using L = Layout; + { + const auto x = L::Partial(); + EXPECT_THAT(Type>(x.Slices(p)), Tuple()); + } + { + const auto x = L::Partial(1); + EXPECT_THAT(Type>>(x.Slices(p)), + Tuple(IsSameSlice(x.Slice<0>(p)))); + } + { + const auto x = L::Partial(1, 2); + EXPECT_THAT((Type, Span>>(x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)))); + } + { + const auto x = L::Partial(1, 2, 3); + EXPECT_THAT((Type, Span, Span>>( + x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } + { + const L x(1, 2, 3); + EXPECT_THAT((Type, Span, Span>>( + x.Slices(p))), + Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), + IsSameSlice(x.Slice<2>(p)))); + } +} + +TEST(Layout, UnalignedTypes) { + constexpr Layout x(1, 2, 3); + alignas(max_align_t) unsigned char p[x.AllocSize() + 1]; + EXPECT_THAT(x.Pointers(p + 1), Tuple(p + 1, p + 2, p + 4)); +} + +TEST(Layout, CustomAlignment) { + constexpr Layout> x(1, 2); + alignas(max_align_t) unsigned char p[x.AllocSize()]; + EXPECT_EQ(10, x.AllocSize()); + EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 8)); +} + +TEST(Layout, OverAligned) { + constexpr size_t M = alignof(max_align_t); + constexpr Layout> x(1, 3); +#ifdef __GNUC__ + // Using __attribute__ ((aligned ())) instead of alignas to bypass a gcc bug: + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89357 + __attribute__((aligned(2 * M))) unsigned char p[x.AllocSize()]; +#else + alignas(2 * M) unsigned char p[x.AllocSize()]; +#endif + EXPECT_EQ(2 * M + 3, x.AllocSize()); + EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M)); +} + +TEST(Layout, Alignment) { + static_assert(Layout::Alignment() == 1, ""); + static_assert(Layout::Alignment() == 4, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout>::Alignment() == 64, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); + static_assert(Layout::Alignment() == 8, ""); +} + +TEST(Layout, ConstexprPartial) { + constexpr size_t M = alignof(max_align_t); + constexpr Layout> x(1, 3); + static_assert(x.Partial(1).template Offset<1>() == 2 * M, ""); +} +// [from, to) +struct Region { + size_t from; + size_t to; +}; + +void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) { +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + for (size_t i = 0; i != n; ++i) { + EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i)); + } +#endif +} + +template +void ExpectPoisoned(const unsigned char (&buf)[N], + std::initializer_list reg) { + size_t prev = 0; + for (const Region& r : reg) { + ExpectRegionPoisoned(buf + prev, r.from - prev, false); + ExpectRegionPoisoned(buf + r.from, r.to - r.from, true); + prev = r.to; + } + ExpectRegionPoisoned(buf + prev, N - prev, false); +} + +TEST(Layout, PoisonPadding) { + using L = Layout; + + constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize(); + { + constexpr auto x = L::Partial(); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {}); + } + { + constexpr auto x = L::Partial(1); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}}); + } + { + constexpr auto x = L::Partial(1, 2); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}}); + } + { + constexpr auto x = L::Partial(1, 2, 3); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } + { + constexpr auto x = L::Partial(1, 2, 3, 4); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } + { + constexpr L x(1, 2, 3, 4); + alignas(max_align_t) const unsigned char c[n] = {}; + x.PoisonPadding(c); + EXPECT_EQ(x.Slices(c), x.Slices(c)); + ExpectPoisoned(c, {{1, 8}, {36, 40}}); + } +} + +TEST(Layout, DebugString) { + { + constexpr auto x = Layout::Partial(); + EXPECT_EQ("@0(1)", x.DebugString()); + } + { + constexpr auto x = Layout::Partial(1); + EXPECT_EQ("@0(1)[1]; @4(4)", x.DebugString()); + } + { + constexpr auto x = Layout::Partial(1, 2); + EXPECT_EQ("@0(1)[1]; @4(4)[2]; @12(1)", + x.DebugString()); + } + { + constexpr auto x = + Layout::Partial(1, 2, 3); + EXPECT_EQ( + "@0(1)[1]; @4(4)[2]; @12(1)[3]; " + "@16" + + Int128::Name() + "(16)", + x.DebugString()); + } + { + constexpr auto x = + Layout::Partial(1, 2, 3, 4); + EXPECT_EQ( + "@0(1)[1]; @4(4)[2]; @12(1)[3]; " + "@16" + + Int128::Name() + "(16)[4]", + x.DebugString()); + } + { + constexpr Layout x(1, 2, 3, 4); + EXPECT_EQ( + "@0(1)[1]; @4(4)[2]; @12(1)[3]; " + "@16" + + Int128::Name() + "(16)[4]", + x.DebugString()); + } +} + +TEST(Layout, CharTypes) { + constexpr Layout x(1); + alignas(max_align_t) char c[x.AllocSize()] = {}; + alignas(max_align_t) unsigned char uc[x.AllocSize()] = {}; + alignas(max_align_t) signed char sc[x.AllocSize()] = {}; + alignas(max_align_t) const char cc[x.AllocSize()] = {}; + alignas(max_align_t) const unsigned char cuc[x.AllocSize()] = {}; + alignas(max_align_t) const signed char csc[x.AllocSize()] = {}; + + Type(x.Pointer<0>(c)); + Type(x.Pointer<0>(uc)); + Type(x.Pointer<0>(sc)); + Type(x.Pointer<0>(cc)); + Type(x.Pointer<0>(cuc)); + Type(x.Pointer<0>(csc)); + + Type(x.Pointer(c)); + Type(x.Pointer(uc)); + Type(x.Pointer(sc)); + Type(x.Pointer(cc)); + Type(x.Pointer(cuc)); + Type(x.Pointer(csc)); + + Type>(x.Pointers(c)); + Type>(x.Pointers(uc)); + Type>(x.Pointers(sc)); + Type>(x.Pointers(cc)); + Type>(x.Pointers(cuc)); + Type>(x.Pointers(csc)); + + Type>(x.Slice<0>(c)); + Type>(x.Slice<0>(uc)); + Type>(x.Slice<0>(sc)); + Type>(x.Slice<0>(cc)); + Type>(x.Slice<0>(cuc)); + Type>(x.Slice<0>(csc)); + + Type>>(x.Slices(c)); + Type>>(x.Slices(uc)); + Type>>(x.Slices(sc)); + Type>>(x.Slices(cc)); + Type>>(x.Slices(cuc)); + Type>>(x.Slices(csc)); +} + +TEST(Layout, ConstElementType) { + constexpr Layout x(1); + alignas(int32_t) char c[x.AllocSize()] = {}; + const char* cc = c; + const int32_t* p = reinterpret_cast(cc); + + EXPECT_EQ(alignof(int32_t), x.Alignment()); + + EXPECT_EQ(0, x.Offset<0>()); + EXPECT_EQ(0, x.Offset()); + + EXPECT_THAT(x.Offsets(), ElementsAre(0)); + + EXPECT_EQ(1, x.Size<0>()); + EXPECT_EQ(1, x.Size()); + + EXPECT_THAT(x.Sizes(), ElementsAre(1)); + + EXPECT_EQ(sizeof(int32_t), x.AllocSize()); + + EXPECT_EQ(p, Type(x.Pointer<0>(c))); + EXPECT_EQ(p, Type(x.Pointer<0>(cc))); + + EXPECT_EQ(p, Type(x.Pointer(c))); + EXPECT_EQ(p, Type(x.Pointer(cc))); + + EXPECT_THAT(Type>(x.Pointers(c)), Tuple(p)); + EXPECT_THAT(Type>(x.Pointers(cc)), Tuple(p)); + + EXPECT_THAT(Type>(x.Slice<0>(c)), + IsSameSlice(Span(p, 1))); + EXPECT_THAT(Type>(x.Slice<0>(cc)), + IsSameSlice(Span(p, 1))); + + EXPECT_THAT(Type>(x.Slice(c)), + IsSameSlice(Span(p, 1))); + EXPECT_THAT(Type>(x.Slice(cc)), + IsSameSlice(Span(p, 1))); + + EXPECT_THAT(Type>>(x.Slices(c)), + Tuple(IsSameSlice(Span(p, 1)))); + EXPECT_THAT(Type>>(x.Slices(cc)), + Tuple(IsSameSlice(Span(p, 1)))); +} + +namespace example { + +// Immutable move-only string with sizeof equal to sizeof(void*). The string +// size and the characters are kept in the same heap allocation. +class CompactString { + public: + CompactString(const char* s = "") { // NOLINT + const size_t size = strlen(s); + // size_t[1], followed by char[size + 1]. + // This statement doesn't allocate memory. + const L layout(1, size + 1); + // AllocSize() tells us how much memory we need to allocate for all our + // data. + p_.reset(new unsigned char[layout.AllocSize()]); + // If running under ASAN, mark the padding bytes, if any, to catch memory + // errors. + layout.PoisonPadding(p_.get()); + // Store the size in the allocation. + // Pointer() is a synonym for Pointer<0>(). + *layout.Pointer(p_.get()) = size; + // Store the characters in the allocation. + memcpy(layout.Pointer(p_.get()), s, size + 1); + } + + size_t size() const { + // Equivalent to reinterpret_cast(*p). + return *L::Partial().Pointer(p_.get()); + } + + const char* c_str() const { + // Equivalent to reinterpret_cast(p.get() + sizeof(size_t)). + // The argument in Partial(1) specifies that we have size_t[1] in front of + // the characters. + return L::Partial(1).Pointer(p_.get()); + } + + private: + // Our heap allocation contains a size_t followed by an array of chars. + using L = Layout; + std::unique_ptr p_; +}; + +TEST(CompactString, Works) { + CompactString s = "hello"; + EXPECT_EQ(5, s.size()); + EXPECT_STREQ("hello", s.c_str()); +} + +} // namespace example + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/node_hash_policy_test.cc b/TMessagesProj/jni/voip/webrtc/absl/container/internal/node_hash_policy_test.cc new file mode 100644 index 000000000..84aabba96 --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/node_hash_policy_test.cc @@ -0,0 +1,69 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/node_hash_policy.h" + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_policy_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::testing::Pointee; + +struct Policy : node_hash_policy { + using key_type = int; + using init_type = int; + + template + static int* new_element(Alloc* alloc, int value) { + return new int(value); + } + + template + static void delete_element(Alloc* alloc, int* elem) { + delete elem; + } +}; + +using NodePolicy = hash_policy_traits; + +struct NodeTest : ::testing::Test { + std::allocator alloc; + int n = 53; + int* a = &n; +}; + +TEST_F(NodeTest, ConstructDestroy) { + NodePolicy::construct(&alloc, &a, 42); + EXPECT_THAT(a, Pointee(42)); + NodePolicy::destroy(&alloc, &a); +} + +TEST_F(NodeTest, transfer) { + int s = 42; + int* b = &s; + NodePolicy::transfer(&alloc, &a, &b); + EXPECT_EQ(&s, a); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_map.h b/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_map.h index 0a02757dd..c7df2efc6 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_map.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_map.h @@ -51,8 +51,9 @@ class raw_hash_map : public raw_hash_set { using key_arg = typename KeyArgImpl::template type; static_assert(!std::is_reference::value, ""); - // TODO(alkis): remove this assertion and verify that reference mapped_type is - // supported. + + // TODO(b/187807849): Evaluate whether to support reference mapped_type and + // remove this assertion if/when it is supported. static_assert(!std::is_reference::value, ""); using iterator = typename raw_hash_map::raw_hash_set::iterator; diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set.cc b/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set.cc index 919ac0740..687bcb8a4 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set.cc +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set.cc @@ -23,11 +23,17 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { +alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[16] = { + ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, + ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, + ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, + ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty}; + constexpr size_t Group::kWidth; // Returns "random" seed. inline size_t RandomSeed() { -#if ABSL_HAVE_THREAD_LOCAL +#ifdef ABSL_HAVE_THREAD_LOCAL static thread_local size_t counter = 0; size_t value = ++counter; #else // ABSL_HAVE_THREAD_LOCAL @@ -37,12 +43,25 @@ inline size_t RandomSeed() { return value ^ static_cast(reinterpret_cast(&counter)); } -bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) { +bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) { // To avoid problems with weak hashes and single bit tests, we use % 13. // TODO(kfm,sbenza): revisit after we do unconditional mixing return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6; } +void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) { + assert(ctrl[capacity] == ctrl_t::kSentinel); + assert(IsValidCapacity(capacity)); + for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) { + Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos); + } + // Copy the cloned ctrl bytes. + std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes()); + ctrl[capacity] = ctrl_t::kSentinel; +} +// Extern template instantiotion for inline function. +template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t); + } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set.h b/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set.h index e47e1fedf..7409d5eca 100644 --- a/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set.h +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set.h @@ -87,6 +87,17 @@ // // This probing function guarantees that after N probes, all the groups of the // table will be probed exactly once. +// +// The control state and slot array are stored contiguously in a shared heap +// allocation. The layout of this allocation is: `capacity()` control bytes, +// one sentinel control byte, `Group::kWidth - 1` cloned control bytes, +// , `capacity()` slots. The sentinel control byte is used in +// iteration so we know when we reach the end of the table. The cloned control +// bytes at the end of the table are cloned from the beginning of the table so +// groups that begin near the end of the table can see a full group. In cases in +// which there are more than `capacity()` cloned control bytes, the extra bytes +// are `kEmpty`, and these ensure that we always see at least one empty slot and +// can stop an unsuccessful search. #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ @@ -102,9 +113,8 @@ #include #include -#include "absl/base/internal/bits.h" #include "absl/base/internal/endian.h" -#include "absl/base/macros.h" +#include "absl/base/optimization.h" #include "absl/base/port.h" #include "absl/container/internal/common.h" #include "absl/container/internal/compressed_tuple.h" @@ -113,15 +123,25 @@ #include "absl/container/internal/hashtable_debug_hooks.h" #include "absl/container/internal/hashtablez_sampler.h" #include "absl/container/internal/have_sse.h" -#include "absl/container/internal/layout.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" +#include "absl/numeric/bits.h" #include "absl/utility/utility.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { +template +void SwapAlloc(AllocType& lhs, AllocType& rhs, + std::true_type /* propagate_on_container_swap */) { + using std::swap; + swap(lhs, rhs); +} +template +void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/, + std::false_type /* propagate_on_container_swap */) {} + template class probe_seq { public: @@ -169,24 +189,19 @@ struct IsDecomposable< // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. template -constexpr bool IsNoThrowSwappable() { +constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) { using std::swap; return noexcept(swap(std::declval(), std::declval())); } - -template -int TrailingZeros(T x) { - return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64( - static_cast(x)) - : base_internal::CountTrailingZerosNonZero32( - static_cast(x)); +template +constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) { + return false; } template -int LeadingZeros(T x) { - return sizeof(T) == 8 - ? base_internal::CountLeadingZeros64(static_cast(x)) - : base_internal::CountLeadingZeros32(static_cast(x)); +uint32_t TrailingZeros(T x) { + ABSL_INTERNAL_ASSUME(x != 0); + return static_cast(countr_zero(x)); } // An abstraction over a bitmask. It provides an easy way to iterate through the @@ -215,27 +230,25 @@ class BitMask { return *this; } explicit operator bool() const { return mask_ != 0; } - int operator*() const { return LowestBitSet(); } - int LowestBitSet() const { + uint32_t operator*() const { return LowestBitSet(); } + uint32_t LowestBitSet() const { return container_internal::TrailingZeros(mask_) >> Shift; } - int HighestBitSet() const { - return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) - - 1) >> - Shift; + uint32_t HighestBitSet() const { + return static_cast((bit_width(mask_) - 1) >> Shift); } BitMask begin() const { return *this; } BitMask end() const { return BitMask(0); } - int TrailingZeros() const { + uint32_t TrailingZeros() const { return container_internal::TrailingZeros(mask_) >> Shift; } - int LeadingZeros() const { + uint32_t LeadingZeros() const { constexpr int total_significant_bits = SignificantBits << Shift; constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; - return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift; + return static_cast(countl_zero(mask_ << extra_bits)) >> Shift; } private: @@ -249,48 +262,53 @@ class BitMask { T mask_; }; -using ctrl_t = signed char; using h2_t = uint8_t; // The values here are selected for maximum performance. See the static asserts -// below for details. -enum Ctrl : ctrl_t { +// below for details. We use an enum class so that when strict aliasing is +// enabled, the compiler knows ctrl_t doesn't alias other types. +enum class ctrl_t : int8_t { kEmpty = -128, // 0b10000000 kDeleted = -2, // 0b11111110 kSentinel = -1, // 0b11111111 }; static_assert( - kEmpty & kDeleted & kSentinel & 0x80, + (static_cast(ctrl_t::kEmpty) & + static_cast(ctrl_t::kDeleted) & + static_cast(ctrl_t::kSentinel) & 0x80) != 0, "Special markers need to have the MSB to make checking for them efficient"); -static_assert(kEmpty < kSentinel && kDeleted < kSentinel, - "kEmpty and kDeleted must be smaller than kSentinel to make the " - "SIMD test of IsEmptyOrDeleted() efficient"); -static_assert(kSentinel == -1, - "kSentinel must be -1 to elide loading it from memory into SIMD " - "registers (pcmpeqd xmm, xmm)"); -static_assert(kEmpty == -128, - "kEmpty must be -128 to make the SIMD check for its " +static_assert( + ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel, + "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than " + "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient"); +static_assert( + ctrl_t::kSentinel == static_cast(-1), + "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD " + "registers (pcmpeqd xmm, xmm)"); +static_assert(ctrl_t::kEmpty == static_cast(-128), + "ctrl_t::kEmpty must be -128 to make the SIMD check for its " "existence efficient (psignb xmm, xmm)"); -static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F, - "kEmpty and kDeleted must share an unset bit that is not shared " - "by kSentinel to make the scalar test for MatchEmptyOrDeleted() " - "efficient"); -static_assert(kDeleted == -2, - "kDeleted must be -2 to make the implementation of " +static_assert( + (~static_cast(ctrl_t::kEmpty) & + ~static_cast(ctrl_t::kDeleted) & + static_cast(ctrl_t::kSentinel) & 0x7F) != 0, + "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not " + "shared by ctrl_t::kSentinel to make the scalar test for " + "MatchEmptyOrDeleted() efficient"); +static_assert(ctrl_t::kDeleted == static_cast(-2), + "ctrl_t::kDeleted must be -2 to make the implementation of " "ConvertSpecialToEmptyAndFullToDeleted efficient"); // A single block of empty control bytes for tables without any slots allocated. // This enables removing a branch in the hot path of find(). +ABSL_DLL extern const ctrl_t kEmptyGroup[16]; inline ctrl_t* EmptyGroup() { - alignas(16) static constexpr ctrl_t empty_group[] = { - kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, - kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty}; - return const_cast(empty_group); + return const_cast(kEmptyGroup); } // Mixes a randomly generated per-process seed with `hash` and `ctrl` to // randomize insertion order within groups. -bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl); +bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl); // Returns a hash seed. // @@ -306,12 +324,12 @@ inline size_t HashSeed(const ctrl_t* ctrl) { inline size_t H1(size_t hash, const ctrl_t* ctrl) { return (hash >> 7) ^ HashSeed(ctrl); } -inline ctrl_t H2(size_t hash) { return hash & 0x7F; } +inline h2_t H2(size_t hash) { return hash & 0x7F; } -inline bool IsEmpty(ctrl_t c) { return c == kEmpty; } -inline bool IsFull(ctrl_t c) { return c >= 0; } -inline bool IsDeleted(ctrl_t c) { return c == kDeleted; } -inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; } +inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; } +inline bool IsFull(ctrl_t c) { return c >= static_cast(0); } +inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; } +inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; } #if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 @@ -342,32 +360,33 @@ struct GroupSse2Impl { BitMask Match(h2_t hash) const { auto match = _mm_set1_epi8(hash); return BitMask( - _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))); + static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); } // Returns a bitmask representing the positions of empty slots. BitMask MatchEmpty() const { #if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 - // This only works because kEmpty is -128. + // This only works because ctrl_t::kEmpty is -128. return BitMask( - _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))); + static_cast(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)))); #else - return Match(static_cast(kEmpty)); + return Match(static_cast(ctrl_t::kEmpty)); #endif } // Returns a bitmask representing the positions of empty or deleted slots. BitMask MatchEmptyOrDeleted() const { - auto special = _mm_set1_epi8(kSentinel); + auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); return BitMask( - _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))); + static_cast( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)))); } // Returns the number of trailing empty or deleted elements in the group. uint32_t CountLeadingEmptyOrDeleted() const { - auto special = _mm_set1_epi8(kSentinel); - return TrailingZeros( - _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1); + auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); + return TrailingZeros(static_cast( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1)); } void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { @@ -400,7 +419,7 @@ struct GroupPortableImpl { // // Caveat: there are false positives but: // - they only occur if there is a real match - // - they never occur on kEmpty, kDeleted, kSentinel + // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel // - they will be handled gracefully by subsequent checks in code // // Example: @@ -445,6 +464,10 @@ using Group = GroupSse2Impl; using Group = GroupPortableImpl; #endif +// The number of cloned control bytes that we copy from the beginning to the +// end of the control bytes array. +constexpr size_t NumClonedBytes() { return Group::kWidth - 1; } + template class raw_hash_set; @@ -452,31 +475,29 @@ inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } // PRECONDITION: // IsValidCapacity(capacity) -// ctrl[capacity] == kSentinel -// ctrl[i] != kSentinel for all i < capacity +// ctrl[capacity] == ctrl_t::kSentinel +// ctrl[i] != ctrl_t::kSentinel for all i < capacity // Applies mapping for every byte in ctrl: // DELETED -> EMPTY // EMPTY -> EMPTY // FULL -> DELETED -inline void ConvertDeletedToEmptyAndFullToDeleted( - ctrl_t* ctrl, size_t capacity) { - assert(ctrl[capacity] == kSentinel); - assert(IsValidCapacity(capacity)); - for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) { - Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos); - } - // Copy the cloned ctrl bytes. - std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth); - ctrl[capacity] = kSentinel; -} +void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity); // Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1. inline size_t NormalizeCapacity(size_t n) { - return n ? ~size_t{} >> LeadingZeros(n) : 1; + return n ? ~size_t{} >> countl_zero(n) : 1; } -// We use 7/8th as maximum load factor. -// For 16-wide groups, that gives an average of two empty slots per group. +// General notes on capacity/growth methods below: +// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an +// average of two empty slots per group. +// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity. +// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we +// never need to probe (the whole table fits in one group) so we don't need a +// load factor less than 1. + +// Given `capacity` of the table, returns the size (i.e. number of full slots) +// at which we should grow the capacity. inline size_t CapacityToGrowth(size_t capacity) { assert(IsValidCapacity(capacity)); // `capacity*7/8` @@ -487,7 +508,7 @@ inline size_t CapacityToGrowth(size_t capacity) { return capacity - capacity / 8; } // From desired "growth" to a lowerbound of the necessary capacity. -// Might not be a valid one and required NormalizeCapacity(). +// Might not be a valid one and requires NormalizeCapacity(). inline size_t GrowthToLowerboundCapacity(size_t growth) { // `growth*8/7` if (Group::kWidth == 8 && growth == 7) { @@ -497,6 +518,148 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) { return growth + static_cast((static_cast(growth) - 1) / 7); } +template +size_t SelectBucketCountForIterRange(InputIter first, InputIter last, + size_t bucket_count) { + if (bucket_count != 0) { + return bucket_count; + } + using InputIterCategory = + typename std::iterator_traits::iterator_category; + if (std::is_base_of::value) { + return GrowthToLowerboundCapacity( + static_cast(std::distance(first, last))); + } + return 0; +} + +inline void AssertIsFull(ctrl_t* ctrl) { + ABSL_HARDENING_ASSERT( + (ctrl != nullptr && IsFull(*ctrl)) && + "Invalid operation on iterator. The element might have " + "been erased, the table might have rehashed, or this may " + "be an end() iterator."); +} + +inline void AssertIsValid(ctrl_t* ctrl) { + ABSL_HARDENING_ASSERT( + (ctrl == nullptr || IsFull(*ctrl)) && + "Invalid operation on iterator. The element might have " + "been erased, the table might have rehashed, or this may " + "be an end() iterator."); +} + +struct FindInfo { + size_t offset; + size_t probe_length; +}; + +// The representation of the object has two modes: +// - small: For capacities < kWidth-1 +// - large: For the rest. +// +// Differences: +// - In small mode we are able to use the whole capacity. The extra control +// bytes give us at least one "empty" control byte to stop the iteration. +// This is important to make 1 a valid capacity. +// +// - In small mode only the first `capacity()` control bytes after the +// sentinel are valid. The rest contain dummy ctrl_t::kEmpty values that do not +// represent a real slot. This is important to take into account on +// find_first_non_full(), where we never try ShouldInsertBackwards() for +// small tables. +inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; } + +inline probe_seq probe(const ctrl_t* ctrl, size_t hash, + size_t capacity) { + return probe_seq(H1(hash, ctrl), capacity); +} + +// Probes the raw_hash_set with the probe sequence for hash and returns the +// pointer to the first empty or deleted slot. +// NOTE: this function must work with tables having both ctrl_t::kEmpty and +// ctrl_t::kDeleted in one group. Such tables appears during +// drop_deletes_without_resize. +// +// This function is very useful when insertions happen and: +// - the input is already a set +// - there are enough slots +// - the element with the hash is not in the table +template +inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash, + size_t capacity) { + auto seq = probe(ctrl, hash, capacity); + while (true) { + Group g{ctrl + seq.offset()}; + auto mask = g.MatchEmptyOrDeleted(); + if (mask) { +#if !defined(NDEBUG) + // We want to add entropy even when ASLR is not enabled. + // In debug build we will randomly insert in either the front or back of + // the group. + // TODO(kfm,sbenza): revisit after we do unconditional mixing + if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) { + return {seq.offset(mask.HighestBitSet()), seq.index()}; + } +#endif + return {seq.offset(mask.LowestBitSet()), seq.index()}; + } + seq.next(); + assert(seq.index() <= capacity && "full table!"); + } +} + +// Extern template for inline function keep possibility of inlining. +// When compiler decided to not inline, no symbols will be added to the +// corresponding translation unit. +extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t); + +// Reset all ctrl bytes back to ctrl_t::kEmpty, except the sentinel. +inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot, + size_t slot_size) { + std::memset(ctrl, static_cast(ctrl_t::kEmpty), + capacity + 1 + NumClonedBytes()); + ctrl[capacity] = ctrl_t::kSentinel; + SanitizerPoisonMemoryRegion(slot, slot_size * capacity); +} + +// Sets the control byte, and if `i < NumClonedBytes()`, set the cloned byte +// at the end too. +inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl, + const void* slot, size_t slot_size) { + assert(i < capacity); + + auto* slot_i = static_cast(slot) + i * slot_size; + if (IsFull(h)) { + SanitizerUnpoisonMemoryRegion(slot_i, slot_size); + } else { + SanitizerPoisonMemoryRegion(slot_i, slot_size); + } + + ctrl[i] = h; + ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h; +} + +inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl, + const void* slot, size_t slot_size) { + SetCtrl(i, static_cast(h), capacity, ctrl, slot, slot_size); +} + +// The allocated block consists of `capacity + 1 + NumClonedBytes()` control +// bytes followed by `capacity` slots, which must be aligned to `slot_align`. +// SlotOffset returns the offset of the slots into the allocated block. +inline size_t SlotOffset(size_t capacity, size_t slot_align) { + assert(IsValidCapacity(capacity)); + const size_t num_control_bytes = capacity + 1 + NumClonedBytes(); + return (num_control_bytes + slot_align - 1) & (~slot_align + 1); +} + +// Returns the size of the allocated block. See also above comment. +inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) { + return SlotOffset(capacity, slot_align) + capacity * slot_size; +} + // Policy: a policy defines how to perform different operations on // the slots of the hashtable (see hash_policy_traits.h for the full interface // of policy). @@ -511,7 +674,8 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) { // if they are equal, false if they are not. If two keys compare equal, then // their hash values as defined by Hash MUST be equal. // -// Allocator: an Allocator [https://devdocs.io/cpp/concept/allocator] with which +// Allocator: an Allocator +// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which // the storage of the hashtable will be allocated and the elements will be // constructed and destroyed. template @@ -552,13 +716,6 @@ class raw_hash_set { auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); - using Layout = absl::container_internal::Layout; - - static Layout MakeLayout(size_t capacity) { - assert(IsValidCapacity(capacity)); - return Layout(capacity + Group::kWidth + 1, capacity); - } - using AllocTraits = absl::allocator_traits; using SlotAlloc = typename absl::allocator_traits< allocator_type>::template rebind_alloc; @@ -617,7 +774,7 @@ class raw_hash_set { // PRECONDITION: not an end() iterator. reference operator*() const { - assert_is_full(); + AssertIsFull(ctrl_); return PolicyTraits::element(slot_); } @@ -626,7 +783,7 @@ class raw_hash_set { // PRECONDITION: not an end() iterator. iterator& operator++() { - assert_is_full(); + AssertIsFull(ctrl_); ++ctrl_; ++slot_; skip_empty_or_deleted(); @@ -640,8 +797,8 @@ class raw_hash_set { } friend bool operator==(const iterator& a, const iterator& b) { - a.assert_is_valid(); - b.assert_is_valid(); + AssertIsValid(a.ctrl_); + AssertIsValid(b.ctrl_); return a.ctrl_ == b.ctrl_; } friend bool operator!=(const iterator& a, const iterator& b) { @@ -649,24 +806,19 @@ class raw_hash_set { } private: - iterator(ctrl_t* ctrl) : ctrl_(ctrl) {} // for end() - iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {} - - void assert_is_full() const { ABSL_HARDENING_ASSERT(IsFull(*ctrl_)); } - void assert_is_valid() const { - ABSL_HARDENING_ASSERT(!ctrl_ || IsFull(*ctrl_) || *ctrl_ == kSentinel); + iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) { + // This assumption helps the compiler know that any non-end iterator is + // not equal to any end iterator. + ABSL_INTERNAL_ASSUME(ctrl != nullptr); } void skip_empty_or_deleted() { while (IsEmptyOrDeleted(*ctrl_)) { - // ctrl is not necessarily aligned to Group::kWidth. It is also likely - // to read past the space for ctrl bytes and into slots. This is ok - // because ctrl has sizeof() == 1 and slot has sizeof() >= 1 so there - // is no way to read outside the combined slot array. uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); ctrl_ += shift; slot_ += shift; } + if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr; } ctrl_t* ctrl_ = nullptr; @@ -725,10 +877,10 @@ class raw_hash_set { explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) - : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) { + : ctrl_(EmptyGroup()), + settings_(0, HashtablezInfoHandle(), hash, eq, alloc) { if (bucket_count) { capacity_ = NormalizeCapacity(bucket_count); - reset_growth_left(); initialize_slots(); } } @@ -747,7 +899,8 @@ class raw_hash_set { raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) - : raw_hash_set(bucket_count, hash, eq, alloc) { + : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count), + hash, eq, alloc) { insert(first, last); } @@ -834,10 +987,11 @@ class raw_hash_set { // than a full `insert`. for (const auto& v : that) { const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v); - auto target = find_first_non_full(hash); - set_ctrl(target.offset, H2(hash)); + auto target = find_first_non_full(ctrl_, hash, capacity_); + SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_, + sizeof(slot_type)); emplace_at(target.offset, v); - infoz_.RecordInsert(hash, target.probe_length); + infoz().RecordInsert(hash, target.probe_length); } size_ = that.size(); growth_left() -= that.size(); @@ -851,28 +1005,27 @@ class raw_hash_set { slots_(absl::exchange(that.slots_, nullptr)), size_(absl::exchange(that.size_, 0)), capacity_(absl::exchange(that.capacity_, 0)), - infoz_(absl::exchange(that.infoz_, HashtablezInfoHandle())), // Hash, equality and allocator are copied instead of moved because // `that` must be left valid. If Hash is std::function, moving it // would create a nullptr functor that cannot be called. - settings_(that.settings_) { - // growth_left was copied above, reset the one from `that`. - that.growth_left() = 0; - } + settings_(absl::exchange(that.growth_left(), 0), + absl::exchange(that.infoz(), HashtablezInfoHandle()), + that.hash_ref(), that.eq_ref(), that.alloc_ref()) {} raw_hash_set(raw_hash_set&& that, const allocator_type& a) : ctrl_(EmptyGroup()), slots_(nullptr), size_(0), capacity_(0), - settings_(0, that.hash_ref(), that.eq_ref(), a) { + settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(), + a) { if (a == that.alloc_ref()) { std::swap(ctrl_, that.ctrl_); std::swap(slots_, that.slots_); std::swap(size_, that.size_); std::swap(capacity_, that.capacity_); std::swap(growth_left(), that.growth_left()); - std::swap(infoz_, that.infoz_); + std::swap(infoz(), that.infoz()); } else { reserve(that.size()); // Note: this will copy elements of dense_set and unordered_set instead of @@ -908,12 +1061,12 @@ class raw_hash_set { it.skip_empty_or_deleted(); return it; } - iterator end() { return {ctrl_ + capacity_}; } + iterator end() { return {}; } const_iterator begin() const { return const_cast(this)->begin(); } - const_iterator end() const { return const_cast(this)->end(); } + const_iterator end() const { return {}; } const_iterator cbegin() const { return begin(); } const_iterator cend() const { return end(); } @@ -932,6 +1085,8 @@ class raw_hash_set { // past that we simply deallocate the array. if (capacity_ > 127) { destroy_slots(); + + infoz().RecordClearedReservation(); } else if (capacity_) { for (size_t i = 0; i != capacity_; ++i) { if (IsFull(ctrl_[i])) { @@ -939,11 +1094,11 @@ class raw_hash_set { } } size_ = 0; - reset_ctrl(); + ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type)); reset_growth_left(); } assert(empty()); - infoz_.RecordStorageChanged(0, capacity_); + infoz().RecordStorageChanged(0, capacity_); } // This overload kicks in when the argument is an rvalue of insertable and @@ -1016,7 +1171,7 @@ class raw_hash_set { template void insert(InputIt first, InputIt last) { - for (; first != last; ++first) insert(*first); + for (; first != last; ++first) emplace(*first); } template = 0, RequiresInsertable = 0> @@ -1043,7 +1198,9 @@ class raw_hash_set { } iterator insert(const_iterator, node_type&& node) { - return insert(std::move(node)).first; + auto res = insert(std::move(node)); + node = std::move(res.node); + return res.position; } // This overload kicks in if we can deduce the key from args. This enables us @@ -1172,7 +1329,7 @@ class raw_hash_set { // This overload is necessary because otherwise erase(const K&) would be // a better match if non-const iterator is passed as an argument. void erase(iterator it) { - it.assert_is_full(); + AssertIsFull(it.ctrl_); PolicyTraits::destroy(&alloc_ref(), it.slot_); erase_meta_only(it); } @@ -1206,7 +1363,7 @@ class raw_hash_set { } node_type extract(const_iterator position) { - position.inner_.assert_is_full(); + AssertIsFull(position.inner_.ctrl_); auto node = CommonAccess::Transfer(alloc_ref(), position.inner_.slot_); erase_meta_only(position); @@ -1223,8 +1380,8 @@ class raw_hash_set { void swap(raw_hash_set& that) noexcept( IsNoThrowSwappable() && IsNoThrowSwappable() && - (!AllocTraits::propagate_on_container_swap::value || - IsNoThrowSwappable())) { + IsNoThrowSwappable( + typename AllocTraits::propagate_on_container_swap{})) { using std::swap; swap(ctrl_, that.ctrl_); swap(slots_, that.slots_); @@ -1233,32 +1390,43 @@ class raw_hash_set { swap(growth_left(), that.growth_left()); swap(hash_ref(), that.hash_ref()); swap(eq_ref(), that.eq_ref()); - swap(infoz_, that.infoz_); - if (AllocTraits::propagate_on_container_swap::value) { - swap(alloc_ref(), that.alloc_ref()); - } else { - // If the allocators do not compare equal it is officially undefined - // behavior. We choose to do nothing. - } + swap(infoz(), that.infoz()); + SwapAlloc(alloc_ref(), that.alloc_ref(), + typename AllocTraits::propagate_on_container_swap{}); } void rehash(size_t n) { if (n == 0 && capacity_ == 0) return; if (n == 0 && size_ == 0) { destroy_slots(); - infoz_.RecordStorageChanged(0, 0); + infoz().RecordStorageChanged(0, 0); + infoz().RecordClearedReservation(); return; } + // bitor is a faster way of doing `max` here. We will round up to the next // power-of-2-minus-1, so bitor is good enough. auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size())); // n == 0 unconditionally rehashes as per the standard. if (n == 0 || m > capacity_) { resize(m); + + // This is after resize, to ensure that we have completed the allocation + // and have potentially sampled the hashtable. + infoz().RecordReservation(n); } } - void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); } + void reserve(size_t n) { + if (n > size() + growth_left()) { + size_t m = GrowthToLowerboundCapacity(n); + resize(NormalizeCapacity(m)); + + // This is after resize, to ensure that we have completed the allocation + // and have potentially sampled the hashtable. + infoz().RecordReservation(n); + } + } // Extension API: support for heterogeneous keys. // @@ -1283,7 +1451,8 @@ class raw_hash_set { void prefetch(const key_arg& key) const { (void)key; #if defined(__GNUC__) - auto seq = probe(hash_ref()(key)); + prefetch_heap_block(); + auto seq = probe(ctrl_, hash_ref()(key), capacity_); __builtin_prefetch(static_cast(ctrl_ + seq.offset())); __builtin_prefetch(static_cast(slots_ + seq.offset())); #endif // __GNUC__ @@ -1298,10 +1467,10 @@ class raw_hash_set { // called heterogeneous key support. template iterator find(const key_arg& key, size_t hash) { - auto seq = probe(hash); + auto seq = probe(ctrl_, hash, capacity_); while (true) { Group g{ctrl_ + seq.offset()}; - for (int i : g.Match(H2(hash))) { + for (uint32_t i : g.Match(H2(hash))) { if (ABSL_PREDICT_TRUE(PolicyTraits::apply( EqualElement{key, eq_ref()}, PolicyTraits::element(slots_ + seq.offset(i))))) @@ -1309,10 +1478,12 @@ class raw_hash_set { } if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end(); seq.next(); + assert(seq.index() <= capacity_ && "full table!"); } } template iterator find(const key_arg& key) { + prefetch_heap_block(); return find(key, hash_ref()(key)); } @@ -1322,6 +1493,7 @@ class raw_hash_set { } template const_iterator find(const key_arg& key) const { + prefetch_heap_block(); return find(key, hash_ref()(key)); } @@ -1371,6 +1543,14 @@ class raw_hash_set { return !(a == b); } + template + friend typename std::enable_if::value, + H>::type + AbslHashValue(H h, const raw_hash_set& s) { + return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()), + s.size()); + } + friend void swap(raw_hash_set& a, raw_hash_set& b) noexcept(noexcept(a.swap(b))) { a.swap(b); @@ -1443,7 +1623,7 @@ class raw_hash_set { void erase_meta_only(const_iterator it) { assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator"); --size_; - const size_t index = it.inner_.ctrl_ - ctrl_; + const size_t index = static_cast(it.inner_.ctrl_ - ctrl_); const size_t index_before = (index - Group::kWidth) & capacity_; const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty(); const auto empty_before = Group(ctrl_ + index_before).MatchEmpty(); @@ -1456,9 +1636,10 @@ class raw_hash_set { static_cast(empty_after.TrailingZeros() + empty_before.LeadingZeros()) < Group::kWidth; - set_ctrl(index, was_never_full ? kEmpty : kDeleted); + SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted, + capacity_, ctrl_, slots_, sizeof(slot_type)); growth_left() += was_never_full; - infoz_.RecordErase(); + infoz().RecordErase(); } void initialize_slots() { @@ -1475,17 +1656,18 @@ class raw_hash_set { // bound more carefully. if (std::is_same>::value && slots_ == nullptr) { - infoz_ = Sample(); + infoz() = Sample(sizeof(slot_type)); } - auto layout = MakeLayout(capacity_); - char* mem = static_cast( - Allocate(&alloc_ref(), layout.AllocSize())); - ctrl_ = reinterpret_cast(layout.template Pointer<0>(mem)); - slots_ = layout.template Pointer<1>(mem); - reset_ctrl(); + char* mem = static_cast(Allocate( + &alloc_ref(), + AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)))); + ctrl_ = reinterpret_cast(mem); + slots_ = reinterpret_cast( + mem + SlotOffset(capacity_, alignof(slot_type))); + ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type)); reset_growth_left(); - infoz_.RecordStorageChanged(size_, capacity_); + infoz().RecordStorageChanged(size_, capacity_); } void destroy_slots() { @@ -1495,10 +1677,12 @@ class raw_hash_set { PolicyTraits::destroy(&alloc_ref(), slots_ + i); } } - auto layout = MakeLayout(capacity_); + // Unpoison before returning the memory to the allocator. SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); - Deallocate(&alloc_ref(), ctrl_, layout.AllocSize()); + Deallocate( + &alloc_ref(), ctrl_, + AllocSize(capacity_, sizeof(slot_type), alignof(slot_type))); ctrl_ = EmptyGroup(); slots_ = nullptr; size_ = 0; @@ -1519,26 +1703,26 @@ class raw_hash_set { if (IsFull(old_ctrl[i])) { size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, PolicyTraits::element(old_slots + i)); - auto target = find_first_non_full(hash); + auto target = find_first_non_full(ctrl_, hash, capacity_); size_t new_i = target.offset; total_probe_length += target.probe_length; - set_ctrl(new_i, H2(hash)); + SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i); } } if (old_capacity) { SanitizerUnpoisonMemoryRegion(old_slots, sizeof(slot_type) * old_capacity); - auto layout = MakeLayout(old_capacity); - Deallocate(&alloc_ref(), old_ctrl, - layout.AllocSize()); + Deallocate( + &alloc_ref(), old_ctrl, + AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type))); } - infoz_.RecordRehash(total_probe_length); + infoz().RecordRehash(total_probe_length); } void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE { assert(IsValidCapacity(capacity_)); - assert(!is_small()); + assert(!is_small(capacity_)); // Algorithm: // - mark all DELETED slots as EMPTY // - mark all FULL slots as DELETED @@ -1561,34 +1745,35 @@ class raw_hash_set { slot_type* slot = reinterpret_cast(&raw); for (size_t i = 0; i != capacity_; ++i) { if (!IsDeleted(ctrl_[i])) continue; - size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, - PolicyTraits::element(slots_ + i)); - auto target = find_first_non_full(hash); - size_t new_i = target.offset; + const size_t hash = PolicyTraits::apply( + HashElement{hash_ref()}, PolicyTraits::element(slots_ + i)); + const FindInfo target = find_first_non_full(ctrl_, hash, capacity_); + const size_t new_i = target.offset; total_probe_length += target.probe_length; // Verify if the old and new i fall within the same group wrt the hash. // If they do, we don't need to move the object as it falls already in the // best probe we can. - const auto probe_index = [&](size_t pos) { - return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth; + const size_t probe_offset = probe(ctrl_, hash, capacity_).offset(); + const auto probe_index = [probe_offset, this](size_t pos) { + return ((pos - probe_offset) & capacity_) / Group::kWidth; }; // Element doesn't move. if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) { - set_ctrl(i, H2(hash)); + SetCtrl(i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); continue; } if (IsEmpty(ctrl_[new_i])) { // Transfer element to the empty spot. - // set_ctrl poisons/unpoisons the slots so we have to call it at the + // SetCtrl poisons/unpoisons the slots so we have to call it at the // right time. - set_ctrl(new_i, H2(hash)); + SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i); - set_ctrl(i, kEmpty); + SetCtrl(i, ctrl_t::kEmpty, capacity_, ctrl_, slots_, sizeof(slot_type)); } else { assert(IsDeleted(ctrl_[new_i])); - set_ctrl(new_i, H2(hash)); + SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); // Until we are done rehashing, DELETED marks previously FULL slots. // Swap i and new_i elements. PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i); @@ -1598,14 +1783,56 @@ class raw_hash_set { } } reset_growth_left(); - infoz_.RecordRehash(total_probe_length); + infoz().RecordRehash(total_probe_length); } void rehash_and_grow_if_necessary() { if (capacity_ == 0) { resize(1); - } else if (size() <= CapacityToGrowth(capacity()) / 2) { + } else if (capacity_ > Group::kWidth && + // Do these calcuations in 64-bit to avoid overflow. + size() * uint64_t{32} <= capacity_ * uint64_t{25}) { // Squash DELETED without growing if there is enough capacity. + // + // Rehash in place if the current size is <= 25/32 of capacity_. + // Rationale for such a high factor: 1) drop_deletes_without_resize() is + // faster than resize, and 2) it takes quite a bit of work to add + // tombstones. In the worst case, seems to take approximately 4 + // insert/erase pairs to create a single tombstone and so if we are + // rehashing because of tombstones, we can afford to rehash-in-place as + // long as we are reclaiming at least 1/8 the capacity without doing more + // than 2X the work. (Where "work" is defined to be size() for rehashing + // or rehashing in place, and 1 for an insert or erase.) But rehashing in + // place is faster per operation than inserting or even doubling the size + // of the table, so we actually afford to reclaim even less space from a + // resize-in-place. The decision is to rehash in place if we can reclaim + // at about 1/8th of the usable capacity (specifically 3/28 of the + // capacity) which means that the total cost of rehashing will be a small + // fraction of the total work. + // + // Here is output of an experiment using the BM_CacheInSteadyState + // benchmark running the old case (where we rehash-in-place only if we can + // reclaim at least 7/16*capacity_) vs. this code (which rehashes in place + // if we can recover 3/32*capacity_). + // + // Note that although in the worst-case number of rehashes jumped up from + // 15 to 190, but the number of operations per second is almost the same. + // + // Abridged output of running BM_CacheInSteadyState benchmark from + // raw_hash_set_benchmark. N is the number of insert/erase operations. + // + // | OLD (recover >= 7/16 | NEW (recover >= 3/32) + // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes + // 448 | 145284 0.44 18 | 140118 0.44 19 + // 493 | 152546 0.24 11 | 151417 0.48 28 + // 538 | 151439 0.26 11 | 151152 0.53 38 + // 583 | 151765 0.28 11 | 150572 0.57 50 + // 628 | 150241 0.31 11 | 150853 0.61 66 + // 672 | 149602 0.33 12 | 150110 0.66 90 + // 717 | 149998 0.35 12 | 149531 0.70 129 + // 762 | 149836 0.37 13 | 148559 0.74 190 + // 807 | 149736 0.39 14 | 151107 0.39 14 + // 852 | 150204 0.42 15 | 151019 0.42 15 drop_deletes_without_resize(); } else { // Otherwise grow the container. @@ -1615,56 +1842,21 @@ class raw_hash_set { bool has_element(const value_type& elem) const { size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem); - auto seq = probe(hash); + auto seq = probe(ctrl_, hash, capacity_); while (true) { Group g{ctrl_ + seq.offset()}; - for (int i : g.Match(H2(hash))) { + for (uint32_t i : g.Match(H2(hash))) { if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) == elem)) return true; } if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false; seq.next(); - assert(seq.index() < capacity_ && "full table!"); + assert(seq.index() <= capacity_ && "full table!"); } return false; } - // Probes the raw_hash_set with the probe sequence for hash and returns the - // pointer to the first empty or deleted slot. - // NOTE: this function must work with tables having both kEmpty and kDelete - // in one group. Such tables appears during drop_deletes_without_resize. - // - // This function is very useful when insertions happen and: - // - the input is already a set - // - there are enough slots - // - the element with the hash is not in the table - struct FindInfo { - size_t offset; - size_t probe_length; - }; - FindInfo find_first_non_full(size_t hash) { - auto seq = probe(hash); - while (true) { - Group g{ctrl_ + seq.offset()}; - auto mask = g.MatchEmptyOrDeleted(); - if (mask) { -#if !defined(NDEBUG) - // We want to add entropy even when ASLR is not enabled. - // In debug build we will randomly insert in either the front or back of - // the group. - // TODO(kfm,sbenza): revisit after we do unconditional mixing - if (!is_small() && ShouldInsertBackwards(hash, ctrl_)) { - return {seq.offset(mask.HighestBitSet()), seq.index()}; - } -#endif - return {seq.offset(mask.LowestBitSet()), seq.index()}; - } - assert(seq.index() < capacity_ && "full table!"); - seq.next(); - } - } - // TODO(alkis): Optimize this assuming *this and that don't overlap. raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) { raw_hash_set tmp(std::move(that)); @@ -1680,11 +1872,12 @@ class raw_hash_set { protected: template std::pair find_or_prepare_insert(const K& key) { + prefetch_heap_block(); auto hash = hash_ref()(key); - auto seq = probe(hash); + auto seq = probe(ctrl_, hash, capacity_); while (true) { Group g{ctrl_ + seq.offset()}; - for (int i : g.Match(H2(hash))) { + for (uint32_t i : g.Match(H2(hash))) { if (ABSL_PREDICT_TRUE(PolicyTraits::apply( EqualElement{key, eq_ref()}, PolicyTraits::element(slots_ + seq.offset(i))))) @@ -1692,21 +1885,23 @@ class raw_hash_set { } if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break; seq.next(); + assert(seq.index() <= capacity_ && "full table!"); } return {prepare_insert(hash), true}; } size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE { - auto target = find_first_non_full(hash); + auto target = find_first_non_full(ctrl_, hash, capacity_); if (ABSL_PREDICT_FALSE(growth_left() == 0 && !IsDeleted(ctrl_[target.offset]))) { rehash_and_grow_if_necessary(); - target = find_first_non_full(hash); + target = find_first_non_full(ctrl_, hash, capacity_); } ++size_; growth_left() -= IsEmpty(ctrl_[target.offset]); - set_ctrl(target.offset, H2(hash)); - infoz_.RecordInsert(hash, target.probe_length); + SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_, + sizeof(slot_type)); + infoz().RecordInsert(hash, target.probe_length); return target.offset; } @@ -1734,86 +1929,59 @@ class raw_hash_set { private: friend struct RawHashSetTestOnlyAccess; - probe_seq probe(size_t hash) const { - return probe_seq(H1(hash, ctrl_), capacity_); - } - - // Reset all ctrl bytes back to kEmpty, except the sentinel. - void reset_ctrl() { - std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth); - ctrl_[capacity_] = kSentinel; - SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); - } - void reset_growth_left() { growth_left() = CapacityToGrowth(capacity()) - size_; } - // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at - // the end too. - void set_ctrl(size_t i, ctrl_t h) { - assert(i < capacity_); - - if (IsFull(h)) { - SanitizerUnpoisonObject(slots_ + i); - } else { - SanitizerPoisonObject(slots_ + i); - } - - ctrl_[i] = h; - ctrl_[((i - Group::kWidth) & capacity_) + 1 + - ((Group::kWidth - 1) & capacity_)] = h; - } - size_t& growth_left() { return settings_.template get<0>(); } - // The representation of the object has two modes: - // - small: For capacities < kWidth-1 - // - large: For the rest. - // - // Differences: - // - In small mode we are able to use the whole capacity. The extra control - // bytes give us at least one "empty" control byte to stop the iteration. - // This is important to make 1 a valid capacity. - // - // - In small mode only the first `capacity()` control bytes after the - // sentinel are valid. The rest contain dummy kEmpty values that do not - // represent a real slot. This is important to take into account on - // find_first_non_full(), where we never try ShouldInsertBackwards() for - // small tables. - bool is_small() const { return capacity_ < Group::kWidth - 1; } + void prefetch_heap_block() const { + // Prefetch the heap-allocated memory region to resolve potential TLB + // misses. This is intended to overlap with execution of calculating the + // hash for a key. +#if defined(__GNUC__) + __builtin_prefetch(static_cast(ctrl_), 0, 1); +#endif // __GNUC__ + } - hasher& hash_ref() { return settings_.template get<1>(); } - const hasher& hash_ref() const { return settings_.template get<1>(); } - key_equal& eq_ref() { return settings_.template get<2>(); } - const key_equal& eq_ref() const { return settings_.template get<2>(); } - allocator_type& alloc_ref() { return settings_.template get<3>(); } + HashtablezInfoHandle& infoz() { return settings_.template get<1>(); } + + hasher& hash_ref() { return settings_.template get<2>(); } + const hasher& hash_ref() const { return settings_.template get<2>(); } + key_equal& eq_ref() { return settings_.template get<3>(); } + const key_equal& eq_ref() const { return settings_.template get<3>(); } + allocator_type& alloc_ref() { return settings_.template get<4>(); } const allocator_type& alloc_ref() const { - return settings_.template get<3>(); + return settings_.template get<4>(); } // TODO(alkis): Investigate removing some of these fields: // - ctrl/slots can be derived from each other // - size can be moved into the slot array - ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1) * ctrl_t] - slot_type* slots_ = nullptr; // [capacity * slot_type] - size_t size_ = 0; // number of full slots - size_t capacity_ = 0; // total number of slots - HashtablezInfoHandle infoz_; - absl::container_internal::CompressedTuple - settings_{0, hasher{}, key_equal{}, allocator_type{}}; + settings_{0u, HashtablezInfoHandle{}, hasher{}, key_equal{}, + allocator_type{}}; }; // Erases all elements that satisfy the predicate `pred` from the container `c`. template -void EraseIf(Predicate pred, raw_hash_set* c) { +typename raw_hash_set::size_type EraseIf( + Predicate& pred, raw_hash_set* c) { + const auto initial_size = c->size(); for (auto it = c->begin(), last = c->end(); it != last;) { - auto copy_it = it++; - if (pred(*copy_it)) { - c->erase(copy_it); + if (pred(*it)) { + c->erase(it++); + } else { + ++it; } } + return initial_size - c->size(); } namespace hashtable_debug_internal { @@ -1826,10 +1994,10 @@ struct HashtableDebugAccess> { const typename Set::key_type& key) { size_t num_probes = 0; size_t hash = set.hash_ref()(key); - auto seq = set.probe(hash); + auto seq = probe(set.ctrl_, hash, set.capacity_); while (true) { container_internal::Group g{set.ctrl_ + seq.offset()}; - for (int i : g.Match(container_internal::H2(hash))) { + for (uint32_t i : g.Match(container_internal::H2(hash))) { if (Traits::apply( typename Set::template EqualElement{ key, set.eq_ref()}, @@ -1846,8 +2014,7 @@ struct HashtableDebugAccess> { static size_t AllocatedByteSize(const Set& c) { size_t capacity = c.capacity_; if (capacity == 0) return 0; - auto layout = Set::MakeLayout(capacity); - size_t m = layout.AllocSize(); + size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot)); size_t per_slot = Traits::space_used(static_cast(nullptr)); if (per_slot != ~size_t{}) { @@ -1865,8 +2032,8 @@ struct HashtableDebugAccess> { static size_t LowerBoundAllocatedByteSize(size_t size) { size_t capacity = GrowthToLowerboundCapacity(size); if (capacity == 0) return 0; - auto layout = Set::MakeLayout(NormalizeCapacity(capacity)); - size_t m = layout.AllocSize(); + size_t m = + AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot)); size_t per_slot = Traits::space_used(static_cast(nullptr)); if (per_slot != ~size_t{}) { m += per_slot * size; diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set_allocator_test.cc b/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set_allocator_test.cc new file mode 100644 index 000000000..e73f53fd6 --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set_allocator_test.cc @@ -0,0 +1,505 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "gtest/gtest.h" +#include "absl/container/internal/raw_hash_set.h" +#include "absl/container/internal/tracked.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +enum AllocSpec { + kPropagateOnCopy = 1, + kPropagateOnMove = 2, + kPropagateOnSwap = 4, +}; + +struct AllocState { + size_t num_allocs = 0; + std::set owned; +}; + +template +class CheckedAlloc { + public: + template + friend class CheckedAlloc; + + using value_type = T; + + CheckedAlloc() {} + explicit CheckedAlloc(size_t id) : id_(id) {} + CheckedAlloc(const CheckedAlloc&) = default; + CheckedAlloc& operator=(const CheckedAlloc&) = default; + + template + CheckedAlloc(const CheckedAlloc& that) + : id_(that.id_), state_(that.state_) {} + + template + struct rebind { + using other = CheckedAlloc; + }; + + using propagate_on_container_copy_assignment = + std::integral_constant; + + using propagate_on_container_move_assignment = + std::integral_constant; + + using propagate_on_container_swap = + std::integral_constant; + + CheckedAlloc select_on_container_copy_construction() const { + if (Spec & kPropagateOnCopy) return *this; + return {}; + } + + T* allocate(size_t n) { + T* ptr = std::allocator().allocate(n); + track_alloc(ptr); + return ptr; + } + void deallocate(T* ptr, size_t n) { + memset(ptr, 0, n * sizeof(T)); // The freed memory must be unpoisoned. + track_dealloc(ptr); + return std::allocator().deallocate(ptr, n); + } + + friend bool operator==(const CheckedAlloc& a, const CheckedAlloc& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const CheckedAlloc& a, const CheckedAlloc& b) { + return !(a == b); + } + + size_t num_allocs() const { return state_->num_allocs; } + + void swap(CheckedAlloc& that) { + using std::swap; + swap(id_, that.id_); + swap(state_, that.state_); + } + + friend void swap(CheckedAlloc& a, CheckedAlloc& b) { a.swap(b); } + + friend std::ostream& operator<<(std::ostream& o, const CheckedAlloc& a) { + return o << "alloc(" << a.id_ << ")"; + } + + private: + void track_alloc(void* ptr) { + AllocState* state = state_.get(); + ++state->num_allocs; + if (!state->owned.insert(ptr).second) + ADD_FAILURE() << *this << " got previously allocated memory: " << ptr; + } + void track_dealloc(void* ptr) { + if (state_->owned.erase(ptr) != 1) + ADD_FAILURE() << *this + << " deleting memory owned by another allocator: " << ptr; + } + + size_t id_ = std::numeric_limits::max(); + + std::shared_ptr state_ = std::make_shared(); +}; + +struct Identity { + int32_t operator()(int32_t v) const { return v; } +}; + +struct Policy { + using slot_type = Tracked; + using init_type = Tracked; + using key_type = int32_t; + + template + static void construct(allocator_type* alloc, slot_type* slot, + Args&&... args) { + std::allocator_traits::construct( + *alloc, slot, std::forward(args)...); + } + + template + static void destroy(allocator_type* alloc, slot_type* slot) { + std::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(allocator_type* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(*old_slot)); + destroy(alloc, old_slot); + } + + template + static auto apply(F&& f, int32_t v) -> decltype(std::forward(f)(v, v)) { + return std::forward(f)(v, v); + } + + template + static auto apply(F&& f, const slot_type& v) + -> decltype(std::forward(f)(v.val(), v)) { + return std::forward(f)(v.val(), v); + } + + template + static auto apply(F&& f, slot_type&& v) + -> decltype(std::forward(f)(v.val(), std::move(v))) { + return std::forward(f)(v.val(), std::move(v)); + } + + static slot_type& element(slot_type* slot) { return *slot; } +}; + +template +struct PropagateTest : public ::testing::Test { + using Alloc = CheckedAlloc, Spec>; + + using Table = raw_hash_set, Alloc>; + + PropagateTest() { + EXPECT_EQ(a1, t1.get_allocator()); + EXPECT_NE(a2, t1.get_allocator()); + } + + Alloc a1 = Alloc(1); + Table t1 = Table(0, a1); + Alloc a2 = Alloc(2); +}; + +using PropagateOnAll = + PropagateTest; +using NoPropagateOnCopy = PropagateTest; +using NoPropagateOnMove = PropagateTest; + +TEST_F(PropagateOnAll, Empty) { EXPECT_EQ(0, a1.num_allocs()); } + +TEST_F(PropagateOnAll, InsertAllocates) { + auto it = t1.insert(0).first; + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, InsertDecomposes) { + auto it = t1.insert(0).first; + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); + + EXPECT_FALSE(t1.insert(0).second); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, RehashMoves) { + auto it = t1.insert(0).first; + EXPECT_EQ(0, it->num_moves()); + t1.rehash(2 * t1.capacity()); + EXPECT_EQ(2, a1.num_allocs()); + it = t1.find(0); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructor) { + auto it = t1.insert(0).first; + Table u(t1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructor) { + auto it = t1.insert(0).first; + Table u(t1); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, u.get_allocator().num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a1); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a2); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(t1, a2); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructor) { + auto it = t1.insert(0).first; + Table u(std::move(t1)); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructor) { + auto it = t1.insert(0).first; + Table u(std::move(t1)); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a1); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructorWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a1); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a2); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveConstructorWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(std::move(t1), a2); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = t1; + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = t1; + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, CopyAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = t1; + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(2, a1.num_allocs()); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(NoPropagateOnCopy, CopyAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = t1; + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(1, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = std::move(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveAssignmentWithSameAlloc) { + auto it = t1.insert(0).first; + Table u(0, a1); + u = std::move(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, MoveAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = std::move(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(NoPropagateOnMove, MoveAssignmentWithDifferentAlloc) { + auto it = t1.insert(0).first; + Table u(0, a2); + u = std::move(t1); + it = u.find(0); + EXPECT_EQ(a2, u.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(1, a2.num_allocs()); + EXPECT_EQ(1, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +TEST_F(PropagateOnAll, Swap) { + auto it = t1.insert(0).first; + Table u(0, a2); + u.swap(t1); + EXPECT_EQ(a1, u.get_allocator()); + EXPECT_EQ(a2, t1.get_allocator()); + EXPECT_EQ(1, a1.num_allocs()); + EXPECT_EQ(0, a2.num_allocs()); + EXPECT_EQ(0, it->num_moves()); + EXPECT_EQ(0, it->num_copies()); +} + +// This allocator is similar to std::pmr::polymorphic_allocator. +// Note the disabled assignment. +template +class PAlloc { + template + friend class PAlloc; + + public: + // types + using value_type = T; + + // traits + using propagate_on_container_swap = std::false_type; + + PAlloc() noexcept = default; + explicit PAlloc(size_t id) noexcept : id_(id) {} + PAlloc(const PAlloc&) noexcept = default; + PAlloc& operator=(const PAlloc&) noexcept = delete; + + template + PAlloc(const PAlloc& that) noexcept : id_(that.id_) {} // NOLINT + + template + struct rebind { + using other = PAlloc; + }; + + constexpr PAlloc select_on_container_copy_construction() const { return {}; } + + // public member functions + T* allocate(size_t) { return new T; } + void deallocate(T* p, size_t) { delete p; } + + friend bool operator==(const PAlloc& a, const PAlloc& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const PAlloc& a, const PAlloc& b) { return !(a == b); } + + private: + size_t id_ = std::numeric_limits::max(); +}; + +// This doesn't compile with GCC 5.4 and 5.5 due to a bug in noexcept handing. +#if !defined(__GNUC__) || __GNUC__ != 5 || (__GNUC_MINOR__ != 4 && \ + __GNUC_MINOR__ != 5) +TEST(NoPropagateOn, Swap) { + using PA = PAlloc; + using Table = raw_hash_set, PA>; + + Table t1(PA{1}), t2(PA{2}); + swap(t1, t2); + EXPECT_EQ(t1.get_allocator(), PA(1)); + EXPECT_EQ(t2.get_allocator(), PA(2)); +} +#endif + +TEST(NoPropagateOn, CopyConstruct) { + using PA = PAlloc; + using Table = raw_hash_set, PA>; + + Table t1(PA{1}), t2(t1); + EXPECT_EQ(t1.get_allocator(), PA(1)); + EXPECT_EQ(t2.get_allocator(), PA()); +} + +TEST(NoPropagateOn, Assignment) { + using PA = PAlloc; + using Table = raw_hash_set, PA>; + + Table t1(PA{1}), t2(PA{2}); + t1 = t2; + EXPECT_EQ(t1.get_allocator(), PA(1)); + EXPECT_EQ(t2.get_allocator(), PA(2)); +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set_benchmark.cc b/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set_benchmark.cc new file mode 100644 index 000000000..146ef433c --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set_benchmark.cc @@ -0,0 +1,443 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/raw_hash_set.h" + +#include +#include + +#include "absl/base/internal/raw_logging.h" +#include "absl/container/internal/hash_function_defaults.h" +#include "absl/strings/str_format.h" +#include "benchmark/benchmark.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +struct RawHashSetTestOnlyAccess { + template + static auto GetSlots(const C& c) -> decltype(c.slots_) { + return c.slots_; + } +}; + +namespace { + +struct IntPolicy { + using slot_type = int64_t; + using key_type = int64_t; + using init_type = int64_t; + + static void construct(void*, int64_t* slot, int64_t v) { *slot = v; } + static void destroy(void*, int64_t*) {} + static void transfer(void*, int64_t* new_slot, int64_t* old_slot) { + *new_slot = *old_slot; + } + + static int64_t& element(slot_type* slot) { return *slot; } + + template + static auto apply(F&& f, int64_t x) -> decltype(std::forward(f)(x, x)) { + return std::forward(f)(x, x); + } +}; + +class StringPolicy { + template ::value>::type> + decltype(std::declval()( + std::declval(), std::piecewise_construct, + std::declval>(), + std::declval())) static apply_impl(F&& f, + std::pair, V> p) { + const absl::string_view& key = std::get<0>(p.first); + return std::forward(f)(key, std::piecewise_construct, std::move(p.first), + std::move(p.second)); + } + + public: + struct slot_type { + struct ctor {}; + + template + slot_type(ctor, Ts&&... ts) : pair(std::forward(ts)...) {} + + std::pair pair; + }; + + using key_type = std::string; + using init_type = std::pair; + + template + static void construct(allocator_type* alloc, slot_type* slot, Args... args) { + std::allocator_traits::construct( + *alloc, slot, typename slot_type::ctor(), std::forward(args)...); + } + + template + static void destroy(allocator_type* alloc, slot_type* slot) { + std::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(allocator_type* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(old_slot->pair)); + destroy(alloc, old_slot); + } + + static std::pair& element(slot_type* slot) { + return slot->pair; + } + + template + static auto apply(F&& f, Args&&... args) + -> decltype(apply_impl(std::forward(f), + PairArgs(std::forward(args)...))) { + return apply_impl(std::forward(f), + PairArgs(std::forward(args)...)); + } +}; + +struct StringHash : container_internal::hash_default_hash { + using is_transparent = void; +}; +struct StringEq : std::equal_to { + using is_transparent = void; +}; + +struct StringTable + : raw_hash_set> { + using Base = typename StringTable::raw_hash_set; + StringTable() {} + using Base::Base; +}; + +struct IntTable + : raw_hash_set, + std::equal_to, std::allocator> { + using Base = typename IntTable::raw_hash_set; + IntTable() {} + using Base::Base; +}; + +struct string_generator { + template + std::string operator()(RNG& rng) const { + std::string res; + res.resize(12); + std::uniform_int_distribution printable_ascii(0x20, 0x7E); + std::generate(res.begin(), res.end(), [&] { return printable_ascii(rng); }); + return res; + } + + size_t size; +}; + +// Model a cache in steady state. +// +// On a table of size N, keep deleting the LRU entry and add a random one. +void BM_CacheInSteadyState(benchmark::State& state) { + std::random_device rd; + std::mt19937 rng(rd()); + string_generator gen{12}; + StringTable t; + std::deque keys; + while (t.size() < state.range(0)) { + auto x = t.emplace(gen(rng), gen(rng)); + if (x.second) keys.push_back(x.first->first); + } + ABSL_RAW_CHECK(state.range(0) >= 10, ""); + while (state.KeepRunning()) { + // Some cache hits. + std::deque::const_iterator it; + for (int i = 0; i != 90; ++i) { + if (i % 10 == 0) it = keys.end(); + ::benchmark::DoNotOptimize(t.find(*--it)); + } + // Some cache misses. + for (int i = 0; i != 10; ++i) ::benchmark::DoNotOptimize(t.find(gen(rng))); + ABSL_RAW_CHECK(t.erase(keys.front()), keys.front().c_str()); + keys.pop_front(); + while (true) { + auto x = t.emplace(gen(rng), gen(rng)); + if (x.second) { + keys.push_back(x.first->first); + break; + } + } + } + state.SetItemsProcessed(state.iterations()); + state.SetLabel(absl::StrFormat("load_factor=%.2f", t.load_factor())); +} + +template +void CacheInSteadyStateArgs(Benchmark* bm) { + // The default. + const float max_load_factor = 0.875; + // When the cache is at the steady state, the probe sequence will equal + // capacity if there is no reclamation of deleted slots. Pick a number large + // enough to make the benchmark slow for that case. + const size_t capacity = 1 << 10; + + // Check N data points to cover load factors in [0.4, 0.8). + const size_t kNumPoints = 10; + for (size_t i = 0; i != kNumPoints; ++i) + bm->Arg(std::ceil( + capacity * (max_load_factor + i * max_load_factor / kNumPoints) / 2)); +} +BENCHMARK(BM_CacheInSteadyState)->Apply(CacheInSteadyStateArgs); + +void BM_EndComparison(benchmark::State& state) { + std::random_device rd; + std::mt19937 rng(rd()); + string_generator gen{12}; + StringTable t; + while (t.size() < state.range(0)) { + t.emplace(gen(rng), gen(rng)); + } + + for (auto _ : state) { + for (auto it = t.begin(); it != t.end(); ++it) { + benchmark::DoNotOptimize(it); + benchmark::DoNotOptimize(t); + benchmark::DoNotOptimize(it != t.end()); + } + } +} +BENCHMARK(BM_EndComparison)->Arg(400); + +void BM_CopyCtor(benchmark::State& state) { + std::random_device rd; + std::mt19937 rng(rd()); + IntTable t; + std::uniform_int_distribution dist(0, ~uint64_t{}); + + while (t.size() < state.range(0)) { + t.emplace(dist(rng)); + } + + for (auto _ : state) { + IntTable t2 = t; + benchmark::DoNotOptimize(t2); + } +} +BENCHMARK(BM_CopyCtor)->Range(128, 4096); + +void BM_CopyAssign(benchmark::State& state) { + std::random_device rd; + std::mt19937 rng(rd()); + IntTable t; + std::uniform_int_distribution dist(0, ~uint64_t{}); + while (t.size() < state.range(0)) { + t.emplace(dist(rng)); + } + + IntTable t2; + for (auto _ : state) { + t2 = t; + benchmark::DoNotOptimize(t2); + } +} +BENCHMARK(BM_CopyAssign)->Range(128, 4096); + +void BM_RangeCtor(benchmark::State& state) { + std::random_device rd; + std::mt19937 rng(rd()); + std::uniform_int_distribution dist(0, ~uint64_t{}); + std::vector values; + const size_t desired_size = state.range(0); + while (values.size() < desired_size) { + values.emplace_back(dist(rng)); + } + + for (auto unused : state) { + IntTable t{values.begin(), values.end()}; + benchmark::DoNotOptimize(t); + } +} +BENCHMARK(BM_RangeCtor)->Range(128, 65536); + +void BM_NoOpReserveIntTable(benchmark::State& state) { + IntTable t; + t.reserve(100000); + for (auto _ : state) { + benchmark::DoNotOptimize(t); + t.reserve(100000); + } +} +BENCHMARK(BM_NoOpReserveIntTable); + +void BM_NoOpReserveStringTable(benchmark::State& state) { + StringTable t; + t.reserve(100000); + for (auto _ : state) { + benchmark::DoNotOptimize(t); + t.reserve(100000); + } +} +BENCHMARK(BM_NoOpReserveStringTable); + +void BM_ReserveIntTable(benchmark::State& state) { + int reserve_size = state.range(0); + for (auto _ : state) { + state.PauseTiming(); + IntTable t; + state.ResumeTiming(); + benchmark::DoNotOptimize(t); + t.reserve(reserve_size); + } +} +BENCHMARK(BM_ReserveIntTable)->Range(128, 4096); + +void BM_ReserveStringTable(benchmark::State& state) { + int reserve_size = state.range(0); + for (auto _ : state) { + state.PauseTiming(); + StringTable t; + state.ResumeTiming(); + benchmark::DoNotOptimize(t); + t.reserve(reserve_size); + } +} +BENCHMARK(BM_ReserveStringTable)->Range(128, 4096); + +// Like std::iota, except that ctrl_t doesn't support operator++. +template +void Iota(CtrlIter begin, CtrlIter end, int value) { + for (; begin != end; ++begin, ++value) { + *begin = static_cast(value); + } +} + +void BM_Group_Match(benchmark::State& state) { + std::array group; + Iota(group.begin(), group.end(), -4); + Group g{group.data()}; + h2_t h = 1; + for (auto _ : state) { + ::benchmark::DoNotOptimize(h); + ::benchmark::DoNotOptimize(g); + ::benchmark::DoNotOptimize(g.Match(h)); + } +} +BENCHMARK(BM_Group_Match); + +void BM_Group_MatchEmpty(benchmark::State& state) { + std::array group; + Iota(group.begin(), group.end(), -4); + Group g{group.data()}; + for (auto _ : state) { + ::benchmark::DoNotOptimize(g); + ::benchmark::DoNotOptimize(g.MatchEmpty()); + } +} +BENCHMARK(BM_Group_MatchEmpty); + +void BM_Group_MatchEmptyOrDeleted(benchmark::State& state) { + std::array group; + Iota(group.begin(), group.end(), -4); + Group g{group.data()}; + for (auto _ : state) { + ::benchmark::DoNotOptimize(g); + ::benchmark::DoNotOptimize(g.MatchEmptyOrDeleted()); + } +} +BENCHMARK(BM_Group_MatchEmptyOrDeleted); + +void BM_Group_CountLeadingEmptyOrDeleted(benchmark::State& state) { + std::array group; + Iota(group.begin(), group.end(), -2); + Group g{group.data()}; + for (auto _ : state) { + ::benchmark::DoNotOptimize(g); + ::benchmark::DoNotOptimize(g.CountLeadingEmptyOrDeleted()); + } +} +BENCHMARK(BM_Group_CountLeadingEmptyOrDeleted); + +void BM_Group_MatchFirstEmptyOrDeleted(benchmark::State& state) { + std::array group; + Iota(group.begin(), group.end(), -2); + Group g{group.data()}; + for (auto _ : state) { + ::benchmark::DoNotOptimize(g); + ::benchmark::DoNotOptimize(*g.MatchEmptyOrDeleted()); + } +} +BENCHMARK(BM_Group_MatchFirstEmptyOrDeleted); + +void BM_DropDeletes(benchmark::State& state) { + constexpr size_t capacity = (1 << 20) - 1; + std::vector ctrl(capacity + 1 + Group::kWidth); + ctrl[capacity] = ctrl_t::kSentinel; + std::vector pattern = {ctrl_t::kEmpty, static_cast(2), + ctrl_t::kDeleted, static_cast(2), + ctrl_t::kEmpty, static_cast(1), + ctrl_t::kDeleted}; + for (size_t i = 0; i != capacity; ++i) { + ctrl[i] = pattern[i % pattern.size()]; + } + while (state.KeepRunning()) { + state.PauseTiming(); + std::vector ctrl_copy = ctrl; + state.ResumeTiming(); + ConvertDeletedToEmptyAndFullToDeleted(ctrl_copy.data(), capacity); + ::benchmark::DoNotOptimize(ctrl_copy[capacity]); + } +} +BENCHMARK(BM_DropDeletes); + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +// These methods are here to make it easy to examine the assembly for targeted +// parts of the API. +auto CodegenAbslRawHashSetInt64Find(absl::container_internal::IntTable* table, + int64_t key) -> decltype(table->find(key)) { + return table->find(key); +} + +bool CodegenAbslRawHashSetInt64FindNeEnd( + absl::container_internal::IntTable* table, int64_t key) { + return table->find(key) != table->end(); +} + +auto CodegenAbslRawHashSetInt64Insert(absl::container_internal::IntTable* table, + int64_t key) + -> decltype(table->insert(key)) { + return table->insert(key); +} + +bool CodegenAbslRawHashSetInt64Contains( + absl::container_internal::IntTable* table, int64_t key) { + return table->contains(key); +} + +void CodegenAbslRawHashSetInt64Iterate( + absl::container_internal::IntTable* table) { + for (auto x : *table) benchmark::DoNotOptimize(x); +} + +int odr = + (::benchmark::DoNotOptimize(std::make_tuple( + &CodegenAbslRawHashSetInt64Find, &CodegenAbslRawHashSetInt64FindNeEnd, + &CodegenAbslRawHashSetInt64Insert, + &CodegenAbslRawHashSetInt64Contains, + &CodegenAbslRawHashSetInt64Iterate)), + 1); diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set_probe_benchmark.cc b/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set_probe_benchmark.cc new file mode 100644 index 000000000..7169a2e20 --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set_probe_benchmark.cc @@ -0,0 +1,590 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Generates probe length statistics for many combinations of key types and key +// distributions, all using the default hash function for swisstable. + +#include +#include // NOLINT +#include + +#include "absl/container/flat_hash_map.h" +#include "absl/container/internal/hash_function_defaults.h" +#include "absl/container/internal/hashtable_debug.h" +#include "absl/container/internal/raw_hash_set.h" +#include "absl/random/distributions.h" +#include "absl/random/random.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_format.h" +#include "absl/strings/string_view.h" +#include "absl/strings/strip.h" + +namespace { + +enum class OutputStyle { kRegular, kBenchmark }; + +// The --benchmark command line flag. +// This is populated from main(). +// When run in "benchmark" mode, we have different output. This allows +// A/B comparisons with tools like `benchy`. +absl::string_view benchmarks; + +OutputStyle output() { + return !benchmarks.empty() ? OutputStyle::kBenchmark : OutputStyle::kRegular; +} + +template +struct Policy { + using slot_type = T; + using key_type = T; + using init_type = T; + + template + static void construct(allocator_type* alloc, slot_type* slot, + const Arg& arg) { + std::allocator_traits::construct(*alloc, slot, arg); + } + + template + static void destroy(allocator_type* alloc, slot_type* slot) { + std::allocator_traits::destroy(*alloc, slot); + } + + static slot_type& element(slot_type* slot) { return *slot; } + + template + static auto apply(F&& f, const slot_type& arg) + -> decltype(std::forward(f)(arg, arg)) { + return std::forward(f)(arg, arg); + } +}; + +absl::BitGen& GlobalBitGen() { + static auto* value = new absl::BitGen; + return *value; +} + +// Keeps a pool of allocations and randomly gives one out. +// This introduces more randomization to the addresses given to swisstable and +// should help smooth out this factor from probe length calculation. +template +class RandomizedAllocator { + public: + using value_type = T; + + RandomizedAllocator() = default; + template + RandomizedAllocator(RandomizedAllocator) {} // NOLINT + + static T* allocate(size_t n) { + auto& pointers = GetPointers(n); + // Fill the pool + while (pointers.size() < kRandomPool) { + pointers.push_back(std::allocator{}.allocate(n)); + } + + // Choose a random one. + size_t i = absl::Uniform(GlobalBitGen(), 0, pointers.size()); + T* result = pointers[i]; + pointers[i] = pointers.back(); + pointers.pop_back(); + return result; + } + + static void deallocate(T* p, size_t n) { + // Just put it back on the pool. No need to release the memory. + GetPointers(n).push_back(p); + } + + private: + // We keep at least kRandomPool allocations for each size. + static constexpr size_t kRandomPool = 20; + + static std::vector& GetPointers(size_t n) { + static auto* m = new absl::flat_hash_map>(); + return (*m)[n]; + } +}; + +template +struct DefaultHash { + using type = absl::container_internal::hash_default_hash; +}; + +template +using DefaultHashT = typename DefaultHash::type; + +template +struct Table : absl::container_internal::raw_hash_set< + Policy, DefaultHashT, + absl::container_internal::hash_default_eq, + RandomizedAllocator> {}; + +struct LoadSizes { + size_t min_load; + size_t max_load; +}; + +LoadSizes GetMinMaxLoadSizes() { + static const auto sizes = [] { + Table t; + + // First, fill enough to have a good distribution. + constexpr size_t kMinSize = 10000; + while (t.size() < kMinSize) t.insert(t.size()); + + const auto reach_min_load_factor = [&] { + const double lf = t.load_factor(); + while (lf <= t.load_factor()) t.insert(t.size()); + }; + + // Then, insert until we reach min load factor. + reach_min_load_factor(); + const size_t min_load_size = t.size(); + + // Keep going until we hit min load factor again, then go back one. + t.insert(t.size()); + reach_min_load_factor(); + + return LoadSizes{min_load_size, t.size() - 1}; + }(); + return sizes; +} + +struct Ratios { + double min_load; + double avg_load; + double max_load; +}; + +// See absl/container/internal/hashtable_debug.h for details on +// probe length calculation. +template +Ratios CollectMeanProbeLengths() { + const auto min_max_sizes = GetMinMaxLoadSizes(); + + ElemFn elem; + using Key = decltype(elem()); + Table t; + + Ratios result; + while (t.size() < min_max_sizes.min_load) t.insert(elem()); + result.min_load = + absl::container_internal::GetHashtableDebugProbeSummary(t).mean; + + while (t.size() < (min_max_sizes.min_load + min_max_sizes.max_load) / 2) + t.insert(elem()); + result.avg_load = + absl::container_internal::GetHashtableDebugProbeSummary(t).mean; + + while (t.size() < min_max_sizes.max_load) t.insert(elem()); + result.max_load = + absl::container_internal::GetHashtableDebugProbeSummary(t).mean; + + return result; +} + +template +uintptr_t PointerForAlignment() { + alignas(Align) static constexpr uintptr_t kInitPointer = 0; + return reinterpret_cast(&kInitPointer); +} + +// This incomplete type is used for testing hash of pointers of different +// alignments. +// NOTE: We are generating invalid pointer values on the fly with +// reinterpret_cast. There are not "safely derived" pointers so using them is +// technically UB. It is unlikely to be a problem, though. +template +struct Ptr; + +template +Ptr* MakePtr(uintptr_t v) { + if (sizeof(v) == 8) { + constexpr int kCopyBits = 16; + // Ensure high bits are all the same. + v = static_cast(static_cast(v << kCopyBits) >> + kCopyBits); + } + return reinterpret_cast*>(v); +} + +struct IntIdentity { + uint64_t i; + friend bool operator==(IntIdentity a, IntIdentity b) { return a.i == b.i; } + IntIdentity operator++(int) { return IntIdentity{i++}; } +}; + +template +struct PtrIdentity { + explicit PtrIdentity(uintptr_t val = PointerForAlignment()) : i(val) {} + uintptr_t i; + friend bool operator==(PtrIdentity a, PtrIdentity b) { return a.i == b.i; } + PtrIdentity operator++(int) { + PtrIdentity p(i); + i += Align; + return p; + } +}; + +constexpr char kStringFormat[] = "/path/to/file/name-%07d-of-9999999.txt"; + +template +struct String { + std::string value; + static std::string Make(uint32_t v) { + return {small ? absl::StrCat(v) : absl::StrFormat(kStringFormat, v)}; + } +}; + +template <> +struct DefaultHash { + struct type { + size_t operator()(IntIdentity t) const { return t.i; } + }; +}; + +template +struct DefaultHash> { + struct type { + size_t operator()(PtrIdentity t) const { return t.i; } + }; +}; + +template +struct Sequential { + T operator()() const { return current++; } + mutable T current{}; +}; + +template +struct Sequential*> { + Ptr* operator()() const { + auto* result = MakePtr(current); + current += Align; + return result; + } + mutable uintptr_t current = PointerForAlignment(); +}; + + +template +struct Sequential> { + std::string operator()() const { return String::Make(current++); } + mutable uint32_t current = 0; +}; + +template +struct Sequential> { + mutable Sequential tseq; + mutable Sequential useq; + + using RealT = decltype(tseq()); + using RealU = decltype(useq()); + + mutable std::vector ts; + mutable std::vector us; + mutable size_t ti = 0, ui = 0; + + std::pair operator()() const { + std::pair value{get_t(), get_u()}; + if (ti == 0) { + ti = ui + 1; + ui = 0; + } else { + --ti; + ++ui; + } + return value; + } + + RealT get_t() const { + while (ti >= ts.size()) ts.push_back(tseq()); + return ts[ti]; + } + + RealU get_u() const { + while (ui >= us.size()) us.push_back(useq()); + return us[ui]; + } +}; + +template +struct AlmostSequential { + mutable Sequential current; + + auto operator()() const -> decltype(current()) { + while (absl::Uniform(GlobalBitGen(), 0.0, 1.0) <= percent_skip / 100.) + current(); + return current(); + } +}; + +struct Uniform { + template + T operator()(T) const { + return absl::Uniform(absl::IntervalClosed, GlobalBitGen(), T{0}, ~T{0}); + } +}; + +struct Gaussian { + template + T operator()(T) const { + double d; + do { + d = absl::Gaussian(GlobalBitGen(), 1e6, 1e4); + } while (d <= 0 || d > std::numeric_limits::max() / 2); + return static_cast(d); + } +}; + +struct Zipf { + template + T operator()(T) const { + return absl::Zipf(GlobalBitGen(), std::numeric_limits::max(), 1.6); + } +}; + +template +struct Random { + T operator()() const { return Dist{}(T{}); } +}; + +template +struct Random*, Dist> { + Ptr* operator()() const { + return MakePtr(Random{}() * Align); + } +}; + +template +struct Random { + IntIdentity operator()() const { + return IntIdentity{Random{}()}; + } +}; + +template +struct Random, Dist> { + PtrIdentity operator()() const { + return PtrIdentity{Random{}() * Align}; + } +}; + +template +struct Random, Dist> { + std::string operator()() const { + return String::Make(Random{}()); + } +}; + +template +struct Random, Dist> { + auto operator()() const + -> decltype(std::make_pair(Random{}(), Random{}())) { + return std::make_pair(Random{}(), Random{}()); + } +}; + +template +std::string Name(); + +std::string Name(uint32_t*) { return "u32"; } +std::string Name(uint64_t*) { return "u64"; } +std::string Name(IntIdentity*) { return "IntIdentity"; } + +template +std::string Name(Ptr**) { + return absl::StrCat("Ptr", Align); +} + +template +std::string Name(PtrIdentity*) { + return absl::StrCat("PtrIdentity", Align); +} + +template +std::string Name(String*) { + return small ? "StrS" : "StrL"; +} + +template +std::string Name(std::pair*) { + if (output() == OutputStyle::kBenchmark) + return absl::StrCat("P_", Name(), "_", Name()); + return absl::StrCat("P<", Name(), ",", Name(), ">"); +} + +template +std::string Name(Sequential*) { + return "Sequential"; +} + +template +std::string Name(AlmostSequential*) { + return absl::StrCat("AlmostSeq_", P); +} + +template +std::string Name(Random*) { + return "UnifRand"; +} + +template +std::string Name(Random*) { + return "GausRand"; +} + +template +std::string Name(Random*) { + return "ZipfRand"; +} + +template +std::string Name() { + return Name(static_cast(nullptr)); +} + +constexpr int kNameWidth = 15; +constexpr int kDistWidth = 16; + +bool CanRunBenchmark(absl::string_view name) { + static std::regex* const filter = []() -> std::regex* { + return benchmarks.empty() || benchmarks == "all" + ? nullptr + : new std::regex(std::string(benchmarks)); + }(); + return filter == nullptr || std::regex_search(std::string(name), *filter); +} + +struct Result { + std::string name; + std::string dist_name; + Ratios ratios; +}; + +template +void RunForTypeAndDistribution(std::vector& results) { + std::string name = absl::StrCat(Name(), "/", Name()); + // We have to check against all three names (min/avg/max) before we run it. + // If any of them is enabled, we run it. + if (!CanRunBenchmark(absl::StrCat(name, "/min")) && + !CanRunBenchmark(absl::StrCat(name, "/avg")) && + !CanRunBenchmark(absl::StrCat(name, "/max"))) { + return; + } + results.push_back({Name(), Name(), CollectMeanProbeLengths()}); +} + +template +void RunForType(std::vector& results) { + RunForTypeAndDistribution>(results); + RunForTypeAndDistribution>(results); + RunForTypeAndDistribution>(results); + RunForTypeAndDistribution>(results); +#ifdef NDEBUG + // Disable these in non-opt mode because they take too long. + RunForTypeAndDistribution>(results); + RunForTypeAndDistribution>(results); +#endif // NDEBUG +} + +} // namespace + +int main(int argc, char** argv) { + // Parse the benchmark flags. Ignore all of them except the regex pattern. + for (int i = 1; i < argc; ++i) { + absl::string_view arg = argv[i]; + const auto next = [&] { return argv[std::min(i + 1, argc - 1)]; }; + + if (absl::ConsumePrefix(&arg, "--benchmark_filter")) { + if (arg == "") { + // --benchmark_filter X + benchmarks = next(); + } else if (absl::ConsumePrefix(&arg, "=")) { + // --benchmark_filter=X + benchmarks = arg; + } + } + + // Any --benchmark flag turns on the mode. + if (absl::ConsumePrefix(&arg, "--benchmark")) { + if (benchmarks.empty()) benchmarks="all"; + } + } + + std::vector results; + RunForType(results); + RunForType(results); + RunForType*>(results); + RunForType*>(results); + RunForType*>(results); + RunForType*>(results); + RunForType>(results); + RunForType>(results); + RunForType>(results); + RunForType>(results); + RunForType>(results); + RunForType>(results); + RunForType>(results); + RunForType>>(results); + RunForType, uint64_t>>(results); + RunForType>>(results); + RunForType, uint64_t>>(results); + + switch (output()) { + case OutputStyle::kRegular: + absl::PrintF("%-*s%-*s Min Avg Max\n%s\n", kNameWidth, + "Type", kDistWidth, "Distribution", + std::string(kNameWidth + kDistWidth + 10 * 3, '-')); + for (const auto& result : results) { + absl::PrintF("%-*s%-*s %8.4f %8.4f %8.4f\n", kNameWidth, result.name, + kDistWidth, result.dist_name, result.ratios.min_load, + result.ratios.avg_load, result.ratios.max_load); + } + break; + case OutputStyle::kBenchmark: { + absl::PrintF("{\n"); + absl::PrintF(" \"benchmarks\": [\n"); + absl::string_view comma; + for (const auto& result : results) { + auto print = [&](absl::string_view stat, double Ratios::*val) { + std::string name = + absl::StrCat(result.name, "/", result.dist_name, "/", stat); + // Check the regex again. We might had have enabled only one of the + // stats for the benchmark. + if (!CanRunBenchmark(name)) return; + absl::PrintF(" %s{\n", comma); + absl::PrintF(" \"cpu_time\": %f,\n", 1e9 * result.ratios.*val); + absl::PrintF(" \"real_time\": %f,\n", 1e9 * result.ratios.*val); + absl::PrintF(" \"iterations\": 1,\n"); + absl::PrintF(" \"name\": \"%s\",\n", name); + absl::PrintF(" \"time_unit\": \"ns\"\n"); + absl::PrintF(" }\n"); + comma = ","; + }; + print("min", &Ratios::min_load); + print("avg", &Ratios::avg_load); + print("max", &Ratios::max_load); + } + absl::PrintF(" ],\n"); + absl::PrintF(" \"context\": {\n"); + absl::PrintF(" }\n"); + absl::PrintF("}\n"); + break; + } + } + + return 0; +} diff --git a/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set_test.cc b/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set_test.cc new file mode 100644 index 000000000..e7732f671 --- /dev/null +++ b/TMessagesProj/jni/voip/webrtc/absl/container/internal/raw_hash_set_test.cc @@ -0,0 +1,2182 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/raw_hash_set.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/cycleclock.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" +#include "absl/container/internal/hash_policy_testing.h" +#include "absl/container/internal/hashtable_debug.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +struct RawHashSetTestOnlyAccess { + template + static auto GetSlots(const C& c) -> decltype(c.slots_) { + return c.slots_; + } +}; + +namespace { + +using ::testing::ElementsAre; +using ::testing::Eq; +using ::testing::Ge; +using ::testing::Lt; +using ::testing::Pair; +using ::testing::UnorderedElementsAre; + +// Convenience function to static cast to ctrl_t. +ctrl_t CtrlT(int i) { return static_cast(i); } + +TEST(Util, NormalizeCapacity) { + EXPECT_EQ(1, NormalizeCapacity(0)); + EXPECT_EQ(1, NormalizeCapacity(1)); + EXPECT_EQ(3, NormalizeCapacity(2)); + EXPECT_EQ(3, NormalizeCapacity(3)); + EXPECT_EQ(7, NormalizeCapacity(4)); + EXPECT_EQ(7, NormalizeCapacity(7)); + EXPECT_EQ(15, NormalizeCapacity(8)); + EXPECT_EQ(15, NormalizeCapacity(15)); + EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1)); + EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2)); +} + +TEST(Util, GrowthAndCapacity) { + // Verify that GrowthToCapacity gives the minimum capacity that has enough + // growth. + for (size_t growth = 0; growth < 10000; ++growth) { + SCOPED_TRACE(growth); + size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth)); + // The capacity is large enough for `growth`. + EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth)); + // For (capacity+1) < kWidth, growth should equal capacity. + if (capacity + 1 < Group::kWidth) { + EXPECT_THAT(CapacityToGrowth(capacity), Eq(capacity)); + } else { + EXPECT_THAT(CapacityToGrowth(capacity), Lt(capacity)); + } + if (growth != 0 && capacity > 1) { + // There is no smaller capacity that works. + EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth)); + } + } + + for (size_t capacity = Group::kWidth - 1; capacity < 10000; + capacity = 2 * capacity + 1) { + SCOPED_TRACE(capacity); + size_t growth = CapacityToGrowth(capacity); + EXPECT_THAT(growth, Lt(capacity)); + EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity); + EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity); + } +} + +TEST(Util, probe_seq) { + probe_seq<16> seq(0, 127); + auto gen = [&]() { + size_t res = seq.offset(); + seq.next(); + return res; + }; + std::vector offsets(8); + std::generate_n(offsets.begin(), 8, gen); + EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64)); + seq = probe_seq<16>(128, 127); + std::generate_n(offsets.begin(), 8, gen); + EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64)); +} + +TEST(BitMask, Smoke) { + EXPECT_FALSE((BitMask(0))); + EXPECT_TRUE((BitMask(5))); + + EXPECT_THAT((BitMask(0)), ElementsAre()); + EXPECT_THAT((BitMask(0x1)), ElementsAre(0)); + EXPECT_THAT((BitMask(0x2)), ElementsAre(1)); + EXPECT_THAT((BitMask(0x3)), ElementsAre(0, 1)); + EXPECT_THAT((BitMask(0x4)), ElementsAre(2)); + EXPECT_THAT((BitMask(0x5)), ElementsAre(0, 2)); + EXPECT_THAT((BitMask(0x55)), ElementsAre(0, 2, 4, 6)); + EXPECT_THAT((BitMask(0xAA)), ElementsAre(1, 3, 5, 7)); +} + +TEST(BitMask, WithShift) { + // See the non-SSE version of Group for details on what this math is for. + uint64_t ctrl = 0x1716151413121110; + uint64_t hash = 0x12; + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl ^ (lsbs * hash); + uint64_t mask = (x - lsbs) & ~x & msbs; + EXPECT_EQ(0x0000000080800000, mask); + + BitMask b(mask); + EXPECT_EQ(*b, 2); +} + +TEST(BitMask, LeadingTrailing) { + EXPECT_EQ((BitMask(0x00001a40).LeadingZeros()), 3); + EXPECT_EQ((BitMask(0x00001a40).TrailingZeros()), 6); + + EXPECT_EQ((BitMask(0x00000001).LeadingZeros()), 15); + EXPECT_EQ((BitMask(0x00000001).TrailingZeros()), 0); + + EXPECT_EQ((BitMask(0x00008000).LeadingZeros()), 0); + EXPECT_EQ((BitMask(0x00008000).TrailingZeros()), 15); + + EXPECT_EQ((BitMask(0x0000008080808000).LeadingZeros()), 3); + EXPECT_EQ((BitMask(0x0000008080808000).TrailingZeros()), 1); + + EXPECT_EQ((BitMask(0x0000000000000080).LeadingZeros()), 7); + EXPECT_EQ((BitMask(0x0000000000000080).TrailingZeros()), 0); + + EXPECT_EQ((BitMask(0x8000000000000000).LeadingZeros()), 0); + EXPECT_EQ((BitMask(0x8000000000000000).TrailingZeros()), 7); +} + +TEST(Group, EmptyGroup) { + for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h)); +} + +TEST(Group, Match) { + if (Group::kWidth == 16) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), + ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), + CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), + CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; + EXPECT_THAT(Group{group}.Match(0), ElementsAre()); + EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15)); + EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10)); + EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9)); + EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8)); + } else if (Group::kWidth == 8) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), + ctrl_t::kDeleted, CtrlT(2), CtrlT(1), + ctrl_t::kSentinel, CtrlT(1)}; + EXPECT_THAT(Group{group}.Match(0), ElementsAre()); + EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7)); + EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; + } +} + +TEST(Group, MatchEmpty) { + if (Group::kWidth == 16) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), + ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), + CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), + CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; + EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4)); + } else if (Group::kWidth == 8) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), + ctrl_t::kDeleted, CtrlT(2), CtrlT(1), + ctrl_t::kSentinel, CtrlT(1)}; + EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; + } +} + +TEST(Group, MatchEmptyOrDeleted) { + if (Group::kWidth == 16) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), + ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), + CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), + CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; + EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4)); + } else if (Group::kWidth == 8) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), + ctrl_t::kDeleted, CtrlT(2), CtrlT(1), + ctrl_t::kSentinel, CtrlT(1)}; + EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; + } +} + +TEST(Batch, DropDeletes) { + constexpr size_t kCapacity = 63; + constexpr size_t kGroupWidth = container_internal::Group::kWidth; + std::vector ctrl(kCapacity + 1 + kGroupWidth); + ctrl[kCapacity] = ctrl_t::kSentinel; + std::vector pattern = { + ctrl_t::kEmpty, CtrlT(2), ctrl_t::kDeleted, CtrlT(2), + ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted}; + for (size_t i = 0; i != kCapacity; ++i) { + ctrl[i] = pattern[i % pattern.size()]; + if (i < kGroupWidth - 1) + ctrl[i + kCapacity + 1] = pattern[i % pattern.size()]; + } + ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity); + ASSERT_EQ(ctrl[kCapacity], ctrl_t::kSentinel); + for (size_t i = 0; i < kCapacity + kGroupWidth; ++i) { + ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()]; + if (i == kCapacity) expected = ctrl_t::kSentinel; + if (expected == ctrl_t::kDeleted) expected = ctrl_t::kEmpty; + if (IsFull(expected)) expected = ctrl_t::kDeleted; + EXPECT_EQ(ctrl[i], expected) + << i << " " << static_cast(pattern[i % pattern.size()]); + } +} + +TEST(Group, CountLeadingEmptyOrDeleted) { + const std::vector empty_examples = {ctrl_t::kEmpty, ctrl_t::kDeleted}; + const std::vector full_examples = { + CtrlT(0), CtrlT(1), CtrlT(2), CtrlT(3), + CtrlT(5), CtrlT(9), CtrlT(127), ctrl_t::kSentinel}; + + for (ctrl_t empty : empty_examples) { + std::vector e(Group::kWidth, empty); + EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted()); + for (ctrl_t full : full_examples) { + for (size_t i = 0; i != Group::kWidth; ++i) { + std::vector f(Group::kWidth, empty); + f[i] = full; + EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted()); + } + std::vector f(Group::kWidth, empty); + f[Group::kWidth * 2 / 3] = full; + f[Group::kWidth / 2] = full; + EXPECT_EQ( + Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted()); + } + } +} + +template +struct ValuePolicy { + using slot_type = T; + using key_type = T; + using init_type = T; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + absl::allocator_traits::construct(*alloc, slot, + std::forward(args)...); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + absl::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(*old_slot)); + destroy(alloc, old_slot); + } + + static T& element(slot_type* slot) { return *slot; } + + template + static decltype(absl::container_internal::DecomposeValue( + std::declval(), std::declval()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposeValue( + std::forward(f), std::forward(args)...); + } +}; + +using IntPolicy = ValuePolicy; +using Uint8Policy = ValuePolicy; + +class StringPolicy { + template ::value>::type> + decltype(std::declval()( + std::declval(), std::piecewise_construct, + std::declval>(), + std::declval())) static apply_impl(F&& f, + std::pair, V> p) { + const absl::string_view& key = std::get<0>(p.first); + return std::forward(f)(key, std::piecewise_construct, std::move(p.first), + std::move(p.second)); + } + + public: + struct slot_type { + struct ctor {}; + + template + slot_type(ctor, Ts&&... ts) : pair(std::forward(ts)...) {} + + std::pair pair; + }; + + using key_type = std::string; + using init_type = std::pair; + + template + static void construct(allocator_type* alloc, slot_type* slot, Args... args) { + std::allocator_traits::construct( + *alloc, slot, typename slot_type::ctor(), std::forward(args)...); + } + + template + static void destroy(allocator_type* alloc, slot_type* slot) { + std::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(allocator_type* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(old_slot->pair)); + destroy(alloc, old_slot); + } + + static std::pair& element(slot_type* slot) { + return slot->pair; + } + + template + static auto apply(F&& f, Args&&... args) + -> decltype(apply_impl(std::forward(f), + PairArgs(std::forward(args)...))) { + return apply_impl(std::forward(f), + PairArgs(std::forward(args)...)); + } +}; + +struct StringHash : absl::Hash { + using is_transparent = void; +}; +struct StringEq : std::equal_to { + using is_transparent = void; +}; + +struct StringTable + : raw_hash_set> { + using Base = typename StringTable::raw_hash_set; + StringTable() {} + using Base::Base; +}; + +struct IntTable + : raw_hash_set, + std::equal_to, std::allocator> { + using Base = typename IntTable::raw_hash_set; + using Base::Base; +}; + +struct Uint8Table + : raw_hash_set, + std::equal_to, std::allocator> { + using Base = typename Uint8Table::raw_hash_set; + using Base::Base; +}; + +template +struct CustomAlloc : std::allocator { + CustomAlloc() {} + + template + CustomAlloc(const CustomAlloc& other) {} + + template struct rebind { + using other = CustomAlloc; + }; +}; + +struct CustomAllocIntTable + : raw_hash_set, + std::equal_to, CustomAlloc> { + using Base = typename CustomAllocIntTable::raw_hash_set; + using Base::Base; +}; + +struct BadFastHash { + template + size_t operator()(const T&) const { + return 0; + } +}; + +struct BadTable : raw_hash_set, + std::allocator> { + using Base = typename BadTable::raw_hash_set; + BadTable() {} + using Base::Base; +}; + +TEST(Table, EmptyFunctorOptimization) { + static_assert(std::is_empty>::value, ""); + static_assert(std::is_empty>::value, ""); + + struct MockTable { + void* ctrl; + void* slots; + size_t size; + size_t capacity; + size_t growth_left; + void* infoz; + }; + struct MockTableInfozDisabled { + void* ctrl; + void* slots; + size_t size; + size_t capacity; + size_t growth_left; + }; + struct StatelessHash { + size_t operator()(absl::string_view) const { return 0; } + }; + struct StatefulHash : StatelessHash { + size_t dummy; + }; + + if (std::is_empty::value) { + EXPECT_EQ(sizeof(MockTableInfozDisabled), + sizeof(raw_hash_set, + std::allocator>)); + + EXPECT_EQ(sizeof(MockTableInfozDisabled) + sizeof(StatefulHash), + sizeof(raw_hash_set, + std::allocator>)); + } else { + EXPECT_EQ(sizeof(MockTable), + sizeof(raw_hash_set, + std::allocator>)); + + EXPECT_EQ(sizeof(MockTable) + sizeof(StatefulHash), + sizeof(raw_hash_set, + std::allocator>)); + } +} + +TEST(Table, Empty) { + IntTable t; + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.empty()); +} + +TEST(Table, LookupEmpty) { + IntTable t; + auto it = t.find(0); + EXPECT_TRUE(it == t.end()); +} + +TEST(Table, Insert1) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 0); + EXPECT_EQ(1, t.size()); + EXPECT_THAT(*t.find(0), 0); +} + +TEST(Table, Insert2) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 0); + EXPECT_EQ(1, t.size()); + EXPECT_TRUE(t.find(1) == t.end()); + res = t.emplace(1); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 1); + EXPECT_EQ(2, t.size()); + EXPECT_THAT(*t.find(0), 0); + EXPECT_THAT(*t.find(1), 1); +} + +TEST(Table, InsertCollision) { + BadTable t; + EXPECT_TRUE(t.find(1) == t.end()); + auto res = t.emplace(1); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, 1); + EXPECT_EQ(1, t.size()); + + EXPECT_TRUE(t.find(2) == t.end()); + res = t.emplace(2); + EXPECT_THAT(*res.first, 2); + EXPECT_TRUE(res.second); + EXPECT_EQ(2, t.size()); + + EXPECT_THAT(*t.find(1), 1); + EXPECT_THAT(*t.find(2), 2); +} + +// Test that we do not add existent element in case we need to search through +// many groups with deleted elements +TEST(Table, InsertCollisionAndFindAfterDelete) { + BadTable t; // all elements go to the same group. + // Have at least 2 groups with Group::kWidth collisions + // plus some extra collisions in the last group. + constexpr size_t kNumInserts = Group::kWidth * 2 + 5; + for (size_t i = 0; i < kNumInserts; ++i) { + auto res = t.emplace(i); + EXPECT_TRUE(res.second); + EXPECT_THAT(*res.first, i); + EXPECT_EQ(i + 1, t.size()); + } + + // Remove elements one by one and check + // that we still can find all other elements. + for (size_t i = 0; i < kNumInserts; ++i) { + EXPECT_EQ(1, t.erase(i)) << i; + for (size_t j = i + 1; j < kNumInserts; ++j) { + EXPECT_THAT(*t.find(j), j); + auto res = t.emplace(j); + EXPECT_FALSE(res.second) << i << " " << j; + EXPECT_THAT(*res.first, j); + EXPECT_EQ(kNumInserts - i - 1, t.size()); + } + } + EXPECT_TRUE(t.empty()); +} + +TEST(Table, InsertWithinCapacity) { + IntTable t; + t.reserve(10); + const size_t original_capacity = t.capacity(); + const auto addr = [&](int i) { + return reinterpret_cast(&*t.find(i)); + }; + // Inserting an element does not change capacity. + t.insert(0); + EXPECT_THAT(t.capacity(), original_capacity); + const uintptr_t original_addr_0 = addr(0); + // Inserting another element does not rehash. + t.insert(1); + EXPECT_THAT(t.capacity(), original_capacity); + EXPECT_THAT(addr(0), original_addr_0); + // Inserting lots of duplicate elements does not rehash. + for (int i = 0; i < 100; ++i) { + t.insert(i % 10); + } + EXPECT_THAT(t.capacity(), original_capacity); + EXPECT_THAT(addr(0), original_addr_0); + // Inserting a range of duplicate elements does not rehash. + std::vector dup_range; + for (int i = 0; i < 100; ++i) { + dup_range.push_back(i % 10); + } + t.insert(dup_range.begin(), dup_range.end()); + EXPECT_THAT(t.capacity(), original_capacity); + EXPECT_THAT(addr(0), original_addr_0); +} + +TEST(Table, LazyEmplace) { + StringTable t; + bool called = false; + auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) { + called = true; + f("abc", "ABC"); + }); + EXPECT_TRUE(called); + EXPECT_THAT(*it, Pair("abc", "ABC")); + called = false; + it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) { + called = true; + f("abc", "DEF"); + }); + EXPECT_FALSE(called); + EXPECT_THAT(*it, Pair("abc", "ABC")); +} + +TEST(Table, ContainsEmpty) { + IntTable t; + + EXPECT_FALSE(t.contains(0)); +} + +TEST(Table, Contains1) { + IntTable t; + + EXPECT_TRUE(t.insert(0).second); + EXPECT_TRUE(t.contains(0)); + EXPECT_FALSE(t.contains(1)); + + EXPECT_EQ(1, t.erase(0)); + EXPECT_FALSE(t.contains(0)); +} + +TEST(Table, Contains2) { + IntTable t; + + EXPECT_TRUE(t.insert(0).second); + EXPECT_TRUE(t.contains(0)); + EXPECT_FALSE(t.contains(1)); + + t.clear(); + EXPECT_FALSE(t.contains(0)); +} + +int decompose_constructed; +int decompose_copy_constructed; +int decompose_copy_assigned; +int decompose_move_constructed; +int decompose_move_assigned; +struct DecomposeType { + DecomposeType(int i = 0) : i(i) { // NOLINT + ++decompose_constructed; + } + + explicit DecomposeType(const char* d) : DecomposeType(*d) {} + + DecomposeType(const DecomposeType& other) : i(other.i) { + ++decompose_copy_constructed; + } + DecomposeType& operator=(const DecomposeType& other) { + ++decompose_copy_assigned; + i = other.i; + return *this; + } + DecomposeType(DecomposeType&& other) : i(other.i) { + ++decompose_move_constructed; + } + DecomposeType& operator=(DecomposeType&& other) { + ++decompose_move_assigned; + i = other.i; + return *this; + } + + int i; +}; + +struct DecomposeHash { + using is_transparent = void; + size_t operator()(const DecomposeType& a) const { return a.i; } + size_t operator()(int a) const { return a; } + size_t operator()(const char* a) const { return *a; } +}; + +struct DecomposeEq { + using is_transparent = void; + bool operator()(const DecomposeType& a, const DecomposeType& b) const { + return a.i == b.i; + } + bool operator()(const DecomposeType& a, int b) const { return a.i == b; } + bool operator()(const DecomposeType& a, const char* b) const { + return a.i == *b; + } +}; + +struct DecomposePolicy { + using slot_type = DecomposeType; + using key_type = DecomposeType; + using init_type = DecomposeType; + + template + static void construct(void*, DecomposeType* slot, T&& v) { + ::new (slot) DecomposeType(std::forward(v)); + } + static void destroy(void*, DecomposeType* slot) { slot->~DecomposeType(); } + static DecomposeType& element(slot_type* slot) { return *slot; } + + template + static auto apply(F&& f, const T& x) -> decltype(std::forward(f)(x, x)) { + return std::forward(f)(x, x); + } +}; + +template +void TestDecompose(bool construct_three) { + DecomposeType elem{0}; + const int one = 1; + const char* three_p = "3"; + const auto& three = three_p; + const int elem_vector_count = 256; + std::vector elem_vector(elem_vector_count, DecomposeType{0}); + std::iota(elem_vector.begin(), elem_vector.end(), 0); + + using DecomposeSet = + raw_hash_set>; + DecomposeSet set1; + + decompose_constructed = 0; + int expected_constructed = 0; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.insert(elem); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.insert(1); + EXPECT_EQ(++expected_constructed, decompose_constructed); + set1.emplace("3"); + EXPECT_EQ(++expected_constructed, decompose_constructed); + EXPECT_EQ(expected_constructed, decompose_constructed); + + { // insert(T&&) + set1.insert(1); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(const T&) + set1.insert(one); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(hint, T&&) + set1.insert(set1.begin(), 1); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // insert(hint, const T&) + set1.insert(set1.begin(), one); + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // emplace(...) + set1.emplace(1); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace("3"); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace(one); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace(three); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + { // emplace_hint(...) + set1.emplace_hint(set1.begin(), 1); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), "3"); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), one); + EXPECT_EQ(expected_constructed, decompose_constructed); + set1.emplace_hint(set1.begin(), three); + expected_constructed += construct_three; + EXPECT_EQ(expected_constructed, decompose_constructed); + } + + decompose_copy_constructed = 0; + decompose_copy_assigned = 0; + decompose_move_constructed = 0; + decompose_move_assigned = 0; + int expected_copy_constructed = 0; + int expected_move_constructed = 0; + { // raw_hash_set(first, last) with random-access iterators + DecomposeSet set2(elem_vector.begin(), elem_vector.end()); + // Expect exactly one copy-constructor call for each element if no + // rehashing is done. + expected_copy_constructed += elem_vector_count; + EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed); + EXPECT_EQ(expected_move_constructed, decompose_move_constructed); + EXPECT_EQ(0, decompose_move_assigned); + EXPECT_EQ(0, decompose_copy_assigned); + } + + { // raw_hash_set(first, last) with forward iterators + std::list elem_list(elem_vector.begin(), elem_vector.end()); + expected_copy_constructed = decompose_copy_constructed; + DecomposeSet set2(elem_list.begin(), elem_list.end()); + // Expect exactly N elements copied into set, expect at most 2*N elements + // moving internally for all resizing needed (for a growth factor of 2). + expected_copy_constructed += elem_vector_count; + EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed); + expected_move_constructed += elem_vector_count; + EXPECT_LT(expected_move_constructed, decompose_move_constructed); + expected_move_constructed += elem_vector_count; + EXPECT_GE(expected_move_constructed, decompose_move_constructed); + EXPECT_EQ(0, decompose_move_assigned); + EXPECT_EQ(0, decompose_copy_assigned); + expected_copy_constructed = decompose_copy_constructed; + expected_move_constructed = decompose_move_constructed; + } + + { // insert(first, last) + DecomposeSet set2; + set2.insert(elem_vector.begin(), elem_vector.end()); + // Expect exactly N elements copied into set, expect at most 2*N elements + // moving internally for all resizing needed (for a growth factor of 2). + const int expected_new_elements = elem_vector_count; + const int expected_max_element_moves = 2 * elem_vector_count; + expected_copy_constructed += expected_new_elements; + EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed); + expected_move_constructed += expected_max_element_moves; + EXPECT_GE(expected_move_constructed, decompose_move_constructed); + EXPECT_EQ(0, decompose_move_assigned); + EXPECT_EQ(0, decompose_copy_assigned); + expected_copy_constructed = decompose_copy_constructed; + expected_move_constructed = decompose_move_constructed; + } +} + +TEST(Table, Decompose) { + TestDecompose(false); + + struct TransparentHashIntOverload { + size_t operator()(const DecomposeType& a) const { return a.i; } + size_t operator()(int a) const { return a; } + }; + struct TransparentEqIntOverload { + bool operator()(const DecomposeType& a, const DecomposeType& b) const { + return a.i == b.i; + } + bool operator()(const DecomposeType& a, int b) const { return a.i == b; } + }; + TestDecompose(true); + TestDecompose(true); + TestDecompose(true); +} + +// Returns the largest m such that a table with m elements has the same number +// of buckets as a table with n elements. +size_t MaxDensitySize(size_t n) { + IntTable t; + t.reserve(n); + for (size_t i = 0; i != n; ++i) t.emplace(i); + const size_t c = t.bucket_count(); + while (c == t.bucket_count()) t.emplace(n++); + return t.size() - 1; +} + +struct Modulo1000Hash { + size_t operator()(int x) const { return x % 1000; } +}; + +struct Modulo1000HashTable + : public raw_hash_set, + std::allocator> {}; + +// Test that rehash with no resize happen in case of many deleted slots. +TEST(Table, RehashWithNoResize) { + Modulo1000HashTable t; + // Adding the same length (and the same hash) strings + // to have at least kMinFullGroups groups + // with Group::kWidth collisions. Then fill up to MaxDensitySize; + const size_t kMinFullGroups = 7; + std::vector keys; + for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) { + int k = i * 1000; + t.emplace(k); + keys.push_back(k); + } + const size_t capacity = t.capacity(); + + // Remove elements from all groups except the first and the last one. + // All elements removed from full groups will be marked as ctrl_t::kDeleted. + const size_t erase_begin = Group::kWidth / 2; + const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth; + for (size_t i = erase_begin; i < erase_end; ++i) { + EXPECT_EQ(1, t.erase(keys[i])) << i; + } + keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end); + + auto last_key = keys.back(); + size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key); + + // Make sure that we have to make a lot of probes for last key. + ASSERT_GT(last_key_num_probes, kMinFullGroups); + + int x = 1; + // Insert and erase one element, before inplace rehash happen. + while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) { + t.emplace(x); + ASSERT_EQ(capacity, t.capacity()); + // All elements should be there. + ASSERT_TRUE(t.find(x) != t.end()) << x; + for (const auto& k : keys) { + ASSERT_TRUE(t.find(k) != t.end()) << k; + } + t.erase(x); + ++x; + } +} + +TEST(Table, InsertEraseStressTest) { + IntTable t; + const size_t kMinElementCount = 250; + std::deque keys; + size_t i = 0; + for (; i < MaxDensitySize(kMinElementCount); ++i) { + t.emplace(i); + keys.push_back(i); + } + const size_t kNumIterations = 1000000; + for (; i < kNumIterations; ++i) { + ASSERT_EQ(1, t.erase(keys.front())); + keys.pop_front(); + t.emplace(i); + keys.push_back(i); + } +} + +TEST(Table, InsertOverloads) { + StringTable t; + // These should all trigger the insert(init_type) overload. + t.insert({{}, {}}); + t.insert({"ABC", {}}); + t.insert({"DEF", "!!!"}); + + EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""), + Pair("DEF", "!!!"))); +} + +TEST(Table, LargeTable) { + IntTable t; + for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40); + for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40)); +} + +// Timeout if copy is quadratic as it was in Rust. +TEST(Table, EnsureNonQuadraticAsInRust) { + static const size_t kLargeSize = 1 << 15; + + IntTable t; + for (size_t i = 0; i != kLargeSize; ++i) { + t.insert(i); + } + + // If this is quadratic, the test will timeout. + IntTable t2; + for (const auto& entry : t) t2.insert(entry); +} + +TEST(Table, ClearBug) { + IntTable t; + constexpr size_t capacity = container_internal::Group::kWidth - 1; + constexpr size_t max_size = capacity / 2 + 1; + for (size_t i = 0; i < max_size; ++i) { + t.insert(i); + } + ASSERT_EQ(capacity, t.capacity()); + intptr_t original = reinterpret_cast(&*t.find(2)); + t.clear(); + ASSERT_EQ(capacity, t.capacity()); + for (size_t i = 0; i < max_size; ++i) { + t.insert(i); + } + ASSERT_EQ(capacity, t.capacity()); + intptr_t second = reinterpret_cast(&*t.find(2)); + // We are checking that original and second are close enough to each other + // that they are probably still in the same group. This is not strictly + // guaranteed. + EXPECT_LT(std::abs(original - second), + capacity * sizeof(IntTable::value_type)); +} + +TEST(Table, Erase) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + t.erase(res.first); + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.find(0) == t.end()); +} + +TEST(Table, EraseMaintainsValidIterator) { + IntTable t; + const int kNumElements = 100; + for (int i = 0; i < kNumElements; i ++) { + EXPECT_TRUE(t.emplace(i).second); + } + EXPECT_EQ(t.size(), kNumElements); + + int num_erase_calls = 0; + auto it = t.begin(); + while (it != t.end()) { + t.erase(it++); + num_erase_calls++; + } + + EXPECT_TRUE(t.empty()); + EXPECT_EQ(num_erase_calls, kNumElements); +} + +// Collect N bad keys by following algorithm: +// 1. Create an empty table and reserve it to 2 * N. +// 2. Insert N random elements. +// 3. Take first Group::kWidth - 1 to bad_keys array. +// 4. Clear the table without resize. +// 5. Go to point 2 while N keys not collected +std::vector CollectBadMergeKeys(size_t N) { + static constexpr int kGroupSize = Group::kWidth - 1; + + auto topk_range = [](size_t b, size_t e, + IntTable* t) -> std::vector { + for (size_t i = b; i != e; ++i) { + t->emplace(i); + } + std::vector res; + res.reserve(kGroupSize); + auto it = t->begin(); + for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) { + res.push_back(*it); + } + return res; + }; + + std::vector bad_keys; + bad_keys.reserve(N); + IntTable t; + t.reserve(N * 2); + + for (size_t b = 0; bad_keys.size() < N; b += N) { + auto keys = topk_range(b, b + N, &t); + bad_keys.insert(bad_keys.end(), keys.begin(), keys.end()); + t.erase(t.begin(), t.end()); + EXPECT_TRUE(t.empty()); + } + return bad_keys; +} + +struct ProbeStats { + // Number of elements with specific probe length over all tested tables. + std::vector all_probes_histogram; + // Ratios total_probe_length/size for every tested table. + std::vector single_table_ratios; + + friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) { + ProbeStats res = a; + res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(), + b.all_probes_histogram.size())); + std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(), + res.all_probes_histogram.begin(), + res.all_probes_histogram.begin(), std::plus()); + res.single_table_ratios.insert(res.single_table_ratios.end(), + b.single_table_ratios.begin(), + b.single_table_ratios.end()); + return res; + } + + // Average ratio total_probe_length/size over tables. + double AvgRatio() const { + return std::accumulate(single_table_ratios.begin(), + single_table_ratios.end(), 0.0) / + single_table_ratios.size(); + } + + // Maximum ratio total_probe_length/size over tables. + double MaxRatio() const { + return *std::max_element(single_table_ratios.begin(), + single_table_ratios.end()); + } + + // Percentile ratio total_probe_length/size over tables. + double PercentileRatio(double Percentile = 0.95) const { + auto r = single_table_ratios; + auto mid = r.begin() + static_cast(r.size() * Percentile); + if (mid != r.end()) { + std::nth_element(r.begin(), mid, r.end()); + return *mid; + } else { + return MaxRatio(); + } + } + + // Maximum probe length over all elements and all tables. + size_t MaxProbe() const { return all_probes_histogram.size(); } + + // Fraction of elements with specified probe length. + std::vector ProbeNormalizedHistogram() const { + double total_elements = std::accumulate(all_probes_histogram.begin(), + all_probes_histogram.end(), 0ull); + std::vector res; + for (size_t p : all_probes_histogram) { + res.push_back(p / total_elements); + } + return res; + } + + size_t PercentileProbe(double Percentile = 0.99) const { + size_t idx = 0; + for (double p : ProbeNormalizedHistogram()) { + if (Percentile > p) { + Percentile -= p; + ++idx; + } else { + return idx; + } + } + return idx; + } + + friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) { + out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio() + << ", PercentileRatio:" << s.PercentileRatio() + << ", MaxProbe:" << s.MaxProbe() << ", Probes=["; + for (double p : s.ProbeNormalizedHistogram()) { + out << p << ","; + } + out << "]}"; + + return out; + } +}; + +struct ExpectedStats { + double avg_ratio; + double max_ratio; + std::vector> pecentile_ratios; + std::vector> pecentile_probes; + + friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) { + out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio + << ", PercentileRatios: ["; + for (auto el : s.pecentile_ratios) { + out << el.first << ":" << el.second << ", "; + } + out << "], PercentileProbes: ["; + for (auto el : s.pecentile_probes) { + out << el.first << ":" << el.second << ", "; + } + out << "]}"; + + return out; + } +}; + +void VerifyStats(size_t size, const ExpectedStats& exp, + const ProbeStats& stats) { + EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats; + EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats; + for (auto pr : exp.pecentile_ratios) { + EXPECT_LE(stats.PercentileRatio(pr.first), pr.second) + << size << " " << pr.first << " " << stats; + } + + for (auto pr : exp.pecentile_probes) { + EXPECT_LE(stats.PercentileProbe(pr.first), pr.second) + << size << " " << pr.first << " " << stats; + } +} + +using ProbeStatsPerSize = std::map; + +// Collect total ProbeStats on num_iters iterations of the following algorithm: +// 1. Create new table and reserve it to keys.size() * 2 +// 2. Insert all keys xored with seed +// 3. Collect ProbeStats from final table. +ProbeStats CollectProbeStatsOnKeysXoredWithSeed( + const std::vector& keys, size_t num_iters) { + const size_t reserve_size = keys.size() * 2; + + ProbeStats stats; + + int64_t seed = 0x71b1a19b907d6e33; + while (num_iters--) { + seed = static_cast(static_cast(seed) * 17 + 13); + IntTable t1; + t1.reserve(reserve_size); + for (const auto& key : keys) { + t1.emplace(key ^ seed); + } + + auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1); + stats.all_probes_histogram.resize( + std::max(stats.all_probes_histogram.size(), probe_histogram.size())); + std::transform(probe_histogram.begin(), probe_histogram.end(), + stats.all_probes_histogram.begin(), + stats.all_probes_histogram.begin(), std::plus()); + + size_t total_probe_seq_length = 0; + for (size_t i = 0; i < probe_histogram.size(); ++i) { + total_probe_seq_length += i * probe_histogram[i]; + } + stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 / + keys.size()); + t1.erase(t1.begin(), t1.end()); + } + return stats; +} + +ExpectedStats XorSeedExpectedStats() { + constexpr bool kRandomizesInserts = +#ifdef NDEBUG + false; +#else // NDEBUG + true; +#endif // NDEBUG + + // The effective load factor is larger in non-opt mode because we insert + // elements out of order. + switch (container_internal::Group::kWidth) { + case 8: + if (kRandomizesInserts) { + return {0.05, + 1.0, + {{0.95, 0.5}}, + {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}}; + } else { + return {0.05, + 2.0, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}}; + } + case 16: + if (kRandomizesInserts) { + return {0.1, + 2.0, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + } else { + return {0.05, + 1.0, + {{0.95, 0.05}}, + {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}}; + } + } + ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width"); + return {}; +} + +// TODO(b/80415403): Figure out why this test is so flaky, esp. on MSVC +TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) { + ProbeStatsPerSize stats; + std::vector sizes = {Group::kWidth << 5, Group::kWidth << 10}; + for (size_t size : sizes) { + stats[size] = + CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200); + } + auto expected = XorSeedExpectedStats(); + for (size_t size : sizes) { + auto& stat = stats[size]; + VerifyStats(size, expected, stat); + } +} + +// Collect total ProbeStats on num_iters iterations of the following algorithm: +// 1. Create new table +// 2. Select 10% of keys and insert 10 elements key * 17 + j * 13 +// 3. Collect ProbeStats from final table +ProbeStats CollectProbeStatsOnLinearlyTransformedKeys( + const std::vector& keys, size_t num_iters) { + ProbeStats stats; + + std::random_device rd; + std::mt19937 rng(rd()); + auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; }; + std::uniform_int_distribution dist(0, keys.size()-1); + while (num_iters--) { + IntTable t1; + size_t num_keys = keys.size() / 10; + size_t start = dist(rng); + for (size_t i = 0; i != num_keys; ++i) { + for (size_t j = 0; j != 10; ++j) { + t1.emplace(linear_transform(keys[(i + start) % keys.size()], j)); + } + } + + auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1); + stats.all_probes_histogram.resize( + std::max(stats.all_probes_histogram.size(), probe_histogram.size())); + std::transform(probe_histogram.begin(), probe_histogram.end(), + stats.all_probes_histogram.begin(), + stats.all_probes_histogram.begin(), std::plus()); + + size_t total_probe_seq_length = 0; + for (size_t i = 0; i < probe_histogram.size(); ++i) { + total_probe_seq_length += i * probe_histogram[i]; + } + stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 / + t1.size()); + t1.erase(t1.begin(), t1.end()); + } + return stats; +} + +ExpectedStats LinearTransformExpectedStats() { + constexpr bool kRandomizesInserts = +#ifdef NDEBUG + false; +#else // NDEBUG + true; +#endif // NDEBUG + + // The effective load factor is larger in non-opt mode because we insert + // elements out of order. + switch (container_internal::Group::kWidth) { + case 8: + if (kRandomizesInserts) { + return {0.1, + 0.5, + {{0.95, 0.3}}, + {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; + } else { + return {0.4, + 0.6, + {{0.95, 0.5}}, + {{0.95, 1}, {0.99, 14}, {0.999, 23}, {0.9999, 26}}}; + } + case 16: + if (kRandomizesInserts) { + return {0.1, + 0.4, + {{0.95, 0.3}}, + {{0.95, 1}, {0.99, 2}, {0.999, 9}, {0.9999, 15}}}; + } else { + return {0.05, + 0.2, + {{0.95, 0.1}}, + {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}}; + } + } + ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width"); + return {}; +} + +// TODO(b/80415403): Figure out why this test is so flaky. +TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) { + ProbeStatsPerSize stats; + std::vector sizes = {Group::kWidth << 5, Group::kWidth << 10}; + for (size_t size : sizes) { + stats[size] = CollectProbeStatsOnLinearlyTransformedKeys( + CollectBadMergeKeys(size), 300); + } + auto expected = LinearTransformExpectedStats(); + for (size_t size : sizes) { + auto& stat = stats[size]; + VerifyStats(size, expected, stat); + } +} + +TEST(Table, EraseCollision) { + BadTable t; + + // 1 2 3 + t.emplace(1); + t.emplace(2); + t.emplace(3); + EXPECT_THAT(*t.find(1), 1); + EXPECT_THAT(*t.find(2), 2); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(3, t.size()); + + // 1 DELETED 3 + t.erase(t.find(2)); + EXPECT_THAT(*t.find(1), 1); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(2, t.size()); + + // DELETED DELETED 3 + t.erase(t.find(1)); + EXPECT_TRUE(t.find(1) == t.end()); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_THAT(*t.find(3), 3); + EXPECT_EQ(1, t.size()); + + // DELETED DELETED DELETED + t.erase(t.find(3)); + EXPECT_TRUE(t.find(1) == t.end()); + EXPECT_TRUE(t.find(2) == t.end()); + EXPECT_TRUE(t.find(3) == t.end()); + EXPECT_EQ(0, t.size()); +} + +TEST(Table, EraseInsertProbing) { + BadTable t(100); + + // 1 2 3 4 + t.emplace(1); + t.emplace(2); + t.emplace(3); + t.emplace(4); + + // 1 DELETED 3 DELETED + t.erase(t.find(2)); + t.erase(t.find(4)); + + // 1 10 3 11 12 + t.emplace(10); + t.emplace(11); + t.emplace(12); + + EXPECT_EQ(5, t.size()); + EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12)); +} + +TEST(Table, Clear) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + t.clear(); + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + t.clear(); + EXPECT_EQ(0, t.size()); + EXPECT_TRUE(t.find(0) == t.end()); +} + +TEST(Table, Swap) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + auto res = t.emplace(0); + EXPECT_TRUE(res.second); + EXPECT_EQ(1, t.size()); + IntTable u; + t.swap(u); + EXPECT_EQ(0, t.size()); + EXPECT_EQ(1, u.size()); + EXPECT_TRUE(t.find(0) == t.end()); + EXPECT_THAT(*u.find(0), 0); +} + +TEST(Table, Rehash) { + IntTable t; + EXPECT_TRUE(t.find(0) == t.end()); + t.emplace(0); + t.emplace(1); + EXPECT_EQ(2, t.size()); + t.rehash(128); + EXPECT_EQ(2, t.size()); + EXPECT_THAT(*t.find(0), 0); + EXPECT_THAT(*t.find(1), 1); +} + +TEST(Table, RehashDoesNotRehashWhenNotNecessary) { + IntTable t; + t.emplace(0); + t.emplace(1); + auto* p = &*t.find(0); + t.rehash(1); + EXPECT_EQ(p, &*t.find(0)); +} + +TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) { + IntTable t; + t.rehash(0); + EXPECT_EQ(0, t.bucket_count()); +} + +TEST(Table, RehashZeroDeallocatesEmptyTable) { + IntTable t; + t.emplace(0); + t.clear(); + EXPECT_NE(0, t.bucket_count()); + t.rehash(0); + EXPECT_EQ(0, t.bucket_count()); +} + +TEST(Table, RehashZeroForcesRehash) { + IntTable t; + t.emplace(0); + t.emplace(1); + auto* p = &*t.find(0); + t.rehash(0); + EXPECT_NE(p, &*t.find(0)); +} + +TEST(Table, ConstructFromInitList) { + using P = std::pair; + struct Q { + operator P() const { return {}; } + }; + StringTable t = {P(), Q(), {}, {{}, {}}}; +} + +TEST(Table, CopyConstruct) { + IntTable t; + t.emplace(0); + EXPECT_EQ(1, t.size()); + { + IntTable u(t); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find(0), 0); + } + { + IntTable u{t}; + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find(0), 0); + } + { + IntTable u = t; + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find(0), 0); + } +} + +TEST(Table, CopyConstructWithAlloc) { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u(t, Alloc>()); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +struct ExplicitAllocIntTable + : raw_hash_set, + std::equal_to, Alloc> { + ExplicitAllocIntTable() {} +}; + +TEST(Table, AllocWithExplicitCtor) { + ExplicitAllocIntTable t; + EXPECT_EQ(0, t.size()); +} + +TEST(Table, MoveConstruct) { + { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u(std::move(t)); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } + { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u{std::move(t)}; + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } + { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + + StringTable u = std::move(t); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); + } +} + +TEST(Table, MoveConstructWithAlloc) { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u(std::move(t), Alloc>()); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, CopyAssign) { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u; + u = t; + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, CopySelfAssign) { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + t = *&t; + EXPECT_EQ(1, t.size()); + EXPECT_THAT(*t.find("a"), Pair("a", "b")); +} + +TEST(Table, MoveAssign) { + StringTable t; + t.emplace("a", "b"); + EXPECT_EQ(1, t.size()); + StringTable u; + u = std::move(t); + EXPECT_EQ(1, u.size()); + EXPECT_THAT(*u.find("a"), Pair("a", "b")); +} + +TEST(Table, Equality) { + StringTable t; + std::vector> v = {{"a", "b"}, + {"aa", "bb"}}; + t.insert(std::begin(v), std::end(v)); + StringTable u = t; + EXPECT_EQ(u, t); +} + +TEST(Table, Equality2) { + StringTable t; + std::vector> v1 = {{"a", "b"}, + {"aa", "bb"}}; + t.insert(std::begin(v1), std::end(v1)); + StringTable u; + std::vector> v2 = {{"a", "a"}, + {"aa", "aa"}}; + u.insert(std::begin(v2), std::end(v2)); + EXPECT_NE(u, t); +} + +TEST(Table, Equality3) { + StringTable t; + std::vector> v1 = {{"b", "b"}, + {"bb", "bb"}}; + t.insert(std::begin(v1), std::end(v1)); + StringTable u; + std::vector> v2 = {{"a", "a"}, + {"aa", "aa"}}; + u.insert(std::begin(v2), std::end(v2)); + EXPECT_NE(u, t); +} + +TEST(Table, NumDeletedRegression) { + IntTable t; + t.emplace(0); + t.erase(t.find(0)); + // construct over a deleted slot. + t.emplace(0); + t.clear(); +} + +TEST(Table, FindFullDeletedRegression) { + IntTable t; + for (int i = 0; i < 1000; ++i) { + t.emplace(i); + t.erase(t.find(i)); + } + EXPECT_EQ(0, t.size()); +} + +TEST(Table, ReplacingDeletedSlotDoesNotRehash) { + size_t n; + { + // Compute n such that n is the maximum number of elements before rehash. + IntTable t; + t.emplace(0); + size_t c = t.bucket_count(); + for (n = 1; c == t.bucket_count(); ++n) t.emplace(n); + --n; + } + IntTable t; + t.rehash(n); + const size_t c = t.bucket_count(); + for (size_t i = 0; i != n; ++i) t.emplace(i); + EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n; + t.erase(0); + t.emplace(0); + EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n; +} + +TEST(Table, NoThrowMoveConstruct) { + ASSERT_TRUE( + std::is_nothrow_copy_constructible>::value); + ASSERT_TRUE(std::is_nothrow_copy_constructible< + std::equal_to>::value); + ASSERT_TRUE(std::is_nothrow_copy_constructible>::value); + EXPECT_TRUE(std::is_nothrow_move_constructible::value); +} + +TEST(Table, NoThrowMoveAssign) { + ASSERT_TRUE( + std::is_nothrow_move_assignable>::value); + ASSERT_TRUE( + std::is_nothrow_move_assignable>::value); + ASSERT_TRUE(std::is_nothrow_move_assignable>::value); + ASSERT_TRUE( + absl::allocator_traits>::is_always_equal::value); + EXPECT_TRUE(std::is_nothrow_move_assignable::value); +} + +TEST(Table, NoThrowSwappable) { + ASSERT_TRUE( + container_internal::IsNoThrowSwappable>()); + ASSERT_TRUE(container_internal::IsNoThrowSwappable< + std::equal_to>()); + ASSERT_TRUE(container_internal::IsNoThrowSwappable>()); + EXPECT_TRUE(container_internal::IsNoThrowSwappable()); +} + +TEST(Table, HeterogeneousLookup) { + struct Hash { + size_t operator()(int64_t i) const { return i; } + size_t operator()(double i) const { + ADD_FAILURE(); + return i; + } + }; + struct Eq { + bool operator()(int64_t a, int64_t b) const { return a == b; } + bool operator()(double a, int64_t b) const { + ADD_FAILURE(); + return a == b; + } + bool operator()(int64_t a, double b) const { + ADD_FAILURE(); + return a == b; + } + bool operator()(double a, double b) const { + ADD_FAILURE(); + return a == b; + } + }; + + struct THash { + using is_transparent = void; + size_t operator()(int64_t i) const { return i; } + size_t operator()(double i) const { return i; } + }; + struct TEq { + using is_transparent = void; + bool operator()(int64_t a, int64_t b) const { return a == b; } + bool operator()(double a, int64_t b) const { return a == b; } + bool operator()(int64_t a, double b) const { return a == b; } + bool operator()(double a, double b) const { return a == b; } + }; + + raw_hash_set> s{0, 1, 2}; + // It will convert to int64_t before the query. + EXPECT_EQ(1, *s.find(double{1.1})); + + raw_hash_set> ts{0, 1, 2}; + // It will try to use the double, and fail to find the object. + EXPECT_TRUE(ts.find(1.1) == ts.end()); +} + +template +using CallFind = decltype(std::declval().find(17)); + +template +using CallErase = decltype(std::declval().erase(17)); + +template +using CallExtract = decltype(std::declval().extract(17)); + +template +using CallPrefetch = decltype(std::declval().prefetch(17)); + +template +using CallCount = decltype(std::declval().count(17)); + +template