Media CAS Proxy SDK release: 16.5.0

This commit is contained in:
Buildbot
2021-07-12 21:46:29 +00:00
parent 760d53c347
commit d69222d492
1968 changed files with 638006 additions and 0 deletions

56
ubuntu/absl/BUILD.bazel Normal file
View File

@@ -0,0 +1,56 @@
#
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package(default_visibility = ["//visibility:public"])
licenses(["notice"]) # Apache 2.0
load(":compiler_config_setting.bzl", "create_llvm_config")
create_llvm_config(
name = "llvm_compiler",
visibility = [":__subpackages__"],
)
config_setting(
name = "osx",
constraint_values = [
"@bazel_tools//platforms:osx",
],
)
config_setting(
name = "ios",
constraint_values = [
"@bazel_tools//platforms:ios",
],
)
config_setting(
name = "windows",
values = {
"cpu": "x64_windows",
},
visibility = [":__subpackages__"],
)
config_setting(
name = "ppc",
values = {
"cpu": "ppc",
},
visibility = [":__subpackages__"],
)

View File

@@ -0,0 +1,33 @@
#
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
add_subdirectory(base)
add_subdirectory(algorithm)
add_subdirectory(container)
add_subdirectory(debugging)
add_subdirectory(flags)
add_subdirectory(hash)
add_subdirectory(memory)
add_subdirectory(meta)
add_subdirectory(numeric)
add_subdirectory(random)
add_subdirectory(strings)
add_subdirectory(synchronization)
add_subdirectory(time)
add_subdirectory(types)
add_subdirectory(utility)

View File

@@ -0,0 +1,87 @@
#
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
load(
"//absl:copts/configure_copts.bzl",
"ABSL_DEFAULT_COPTS",
"ABSL_DEFAULT_LINKOPTS",
"ABSL_TEST_COPTS",
)
package(default_visibility = ["//visibility:public"])
licenses(["notice"]) # Apache 2.0
cc_library(
name = "algorithm",
hdrs = ["algorithm.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
)
cc_test(
name = "algorithm_test",
size = "small",
srcs = ["algorithm_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":algorithm",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "algorithm_benchmark",
srcs = ["equal_benchmark.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["benchmark"],
deps = [
":algorithm",
"//absl/base:core_headers",
"@com_github_google_benchmark//:benchmark_main",
],
)
cc_library(
name = "container",
hdrs = [
"container.h",
],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":algorithm",
"//absl/base:core_headers",
"//absl/meta:type_traits",
],
)
cc_test(
name = "container_test",
srcs = ["container_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":container",
"//absl/base",
"//absl/base:core_headers",
"//absl/memory",
"//absl/types:span",
"@com_google_googletest//:gtest_main",
],
)

View File

@@ -0,0 +1,67 @@
#
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
absl_cc_library(
NAME
algorithm
HDRS
"algorithm.h"
COPTS
${ABSL_DEFAULT_COPTS}
PUBLIC
)
absl_cc_test(
NAME
algorithm_test
SRCS
"algorithm_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::algorithm
gmock_main
)
absl_cc_library(
NAME
algorithm_container
HDRS
"container.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::algorithm
absl::core_headers
absl::meta
PUBLIC
)
absl_cc_test(
NAME
container_test
SRCS
"container_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::algorithm_container
absl::base
absl::core_headers
absl::memory
absl::span
gmock_main
)

View File

@@ -0,0 +1,152 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: algorithm.h
// -----------------------------------------------------------------------------
//
// This header file contains Google extensions to the standard <algorithm> C++
// header.
#ifndef ABSL_ALGORITHM_ALGORITHM_H_
#define ABSL_ALGORITHM_ALGORITHM_H_
#include <algorithm>
#include <iterator>
#include <type_traits>
namespace absl {
inline namespace lts_2019_08_08 {
namespace algorithm_internal {
// Performs comparisons with operator==, similar to C++14's `std::equal_to<>`.
struct EqualTo {
template <typename T, typename U>
bool operator()(const T& a, const U& b) const {
return a == b;
}
};
template <typename InputIter1, typename InputIter2, typename Pred>
bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2,
InputIter2 last2, Pred pred, std::input_iterator_tag,
std::input_iterator_tag) {
while (true) {
if (first1 == last1) return first2 == last2;
if (first2 == last2) return false;
if (!pred(*first1, *first2)) return false;
++first1;
++first2;
}
}
template <typename InputIter1, typename InputIter2, typename Pred>
bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2,
InputIter2 last2, Pred&& pred, std::random_access_iterator_tag,
std::random_access_iterator_tag) {
return (last1 - first1 == last2 - first2) &&
std::equal(first1, last1, first2, std::forward<Pred>(pred));
}
// When we are using our own internal predicate that just applies operator==, we
// forward to the non-predicate form of std::equal. This enables an optimization
// in libstdc++ that can result in std::memcmp being used for integer types.
template <typename InputIter1, typename InputIter2>
bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2,
InputIter2 last2, algorithm_internal::EqualTo /* unused */,
std::random_access_iterator_tag,
std::random_access_iterator_tag) {
return (last1 - first1 == last2 - first2) &&
std::equal(first1, last1, first2);
}
template <typename It>
It RotateImpl(It first, It middle, It last, std::true_type) {
return std::rotate(first, middle, last);
}
template <typename It>
It RotateImpl(It first, It middle, It last, std::false_type) {
std::rotate(first, middle, last);
return std::next(first, std::distance(middle, last));
}
} // namespace algorithm_internal
// Compares the equality of two ranges specified by pairs of iterators, using
// the given predicate, returning true iff for each corresponding iterator i1
// and i2 in the first and second range respectively, pred(*i1, *i2) == true
//
// This comparison takes at most min(`last1` - `first1`, `last2` - `first2`)
// invocations of the predicate. Additionally, if InputIter1 and InputIter2 are
// both random-access iterators, and `last1` - `first1` != `last2` - `first2`,
// then the predicate is never invoked and the function returns false.
//
// This is a C++11-compatible implementation of C++14 `std::equal`. See
// https://en.cppreference.com/w/cpp/algorithm/equal for more information.
template <typename InputIter1, typename InputIter2, typename Pred>
bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2,
InputIter2 last2, Pred&& pred) {
return algorithm_internal::EqualImpl(
first1, last1, first2, last2, std::forward<Pred>(pred),
typename std::iterator_traits<InputIter1>::iterator_category{},
typename std::iterator_traits<InputIter2>::iterator_category{});
}
// Performs comparison of two ranges specified by pairs of iterators using
// operator==.
template <typename InputIter1, typename InputIter2>
bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2,
InputIter2 last2) {
return absl::equal(first1, last1, first2, last2,
algorithm_internal::EqualTo{});
}
// Performs a linear search for `value` using the iterator `first` up to
// but not including `last`, returning true if [`first`, `last`) contains an
// element equal to `value`.
//
// A linear search is of O(n) complexity which is guaranteed to make at most
// n = (`last` - `first`) comparisons. A linear search over short containers
// may be faster than a binary search, even when the container is sorted.
template <typename InputIterator, typename EqualityComparable>
bool linear_search(InputIterator first, InputIterator last,
const EqualityComparable& value) {
return std::find(first, last, value) != last;
}
// Performs a left rotation on a range of elements (`first`, `last`) such that
// `middle` is now the first element. `rotate()` returns an iterator pointing to
// the first element before rotation. This function is exactly the same as
// `std::rotate`, but fixes a bug in gcc
// <= 4.9 where `std::rotate` returns `void` instead of an iterator.
//
// The complexity of this algorithm is the same as that of `std::rotate`, but if
// `ForwardIterator` is not a random-access iterator, then `absl::rotate`
// performs an additional pass over the range to construct the return value.
template <typename ForwardIterator>
ForwardIterator rotate(ForwardIterator first, ForwardIterator middle,
ForwardIterator last) {
return algorithm_internal::RotateImpl(
first, middle, last,
std::is_same<decltype(std::rotate(first, middle, last)),
ForwardIterator>());
}
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_ALGORITHM_ALGORITHM_H_

View File

@@ -0,0 +1,182 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/algorithm/algorithm.h"
#include <algorithm>
#include <list>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace {
TEST(EqualTest, DefaultComparisonRandomAccess) {
std::vector<int> v1{1, 2, 3};
std::vector<int> v2 = v1;
std::vector<int> v3 = {1, 2};
std::vector<int> v4 = {1, 2, 4};
EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end()));
EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end()));
EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v4.begin(), v4.end()));
}
TEST(EqualTest, DefaultComparison) {
std::list<int> lst1{1, 2, 3};
std::list<int> lst2 = lst1;
std::list<int> lst3{1, 2};
std::list<int> lst4{1, 2, 4};
EXPECT_TRUE(absl::equal(lst1.begin(), lst1.end(), lst2.begin(), lst2.end()));
EXPECT_FALSE(absl::equal(lst1.begin(), lst1.end(), lst3.begin(), lst3.end()));
EXPECT_FALSE(absl::equal(lst1.begin(), lst1.end(), lst4.begin(), lst4.end()));
}
TEST(EqualTest, EmptyRange) {
std::vector<int> v1{1, 2, 3};
std::vector<int> empty1;
std::vector<int> empty2;
EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), empty1.begin(), empty1.end()));
EXPECT_FALSE(absl::equal(empty1.begin(), empty1.end(), v1.begin(), v1.end()));
EXPECT_TRUE(
absl::equal(empty1.begin(), empty1.end(), empty2.begin(), empty2.end()));
}
TEST(EqualTest, MixedIterTypes) {
std::vector<int> v1{1, 2, 3};
std::list<int> lst1{v1.begin(), v1.end()};
std::list<int> lst2{1, 2, 4};
std::list<int> lst3{1, 2};
EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), lst1.begin(), lst1.end()));
EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), lst2.begin(), lst2.end()));
EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), lst3.begin(), lst3.end()));
}
TEST(EqualTest, MixedValueTypes) {
std::vector<int> v1{1, 2, 3};
std::vector<char> v2{1, 2, 3};
std::vector<char> v3{1, 2};
std::vector<char> v4{1, 2, 4};
EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end()));
EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end()));
EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v4.begin(), v4.end()));
}
TEST(EqualTest, WeirdIterators) {
std::vector<bool> v1{true, false};
std::vector<bool> v2 = v1;
std::vector<bool> v3{true};
std::vector<bool> v4{true, true, true};
EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end()));
EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end()));
EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v4.begin(), v4.end()));
}
TEST(EqualTest, CustomComparison) {
int n[] = {1, 2, 3, 4};
std::vector<int*> v1{&n[0], &n[1], &n[2]};
std::vector<int*> v2 = v1;
std::vector<int*> v3{&n[0], &n[1], &n[3]};
std::vector<int*> v4{&n[0], &n[1]};
auto eq = [](int* a, int* b) { return *a == *b; };
EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end(), eq));
EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end(), eq));
EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v4.begin(), v4.end(), eq));
}
TEST(EqualTest, MoveOnlyPredicate) {
std::vector<int> v1{1, 2, 3};
std::vector<int> v2{4, 5, 6};
// move-only equality predicate
struct Eq {
Eq() = default;
Eq(Eq &&) = default;
Eq(const Eq &) = delete;
Eq &operator=(const Eq &) = delete;
bool operator()(const int a, const int b) const { return a == b; }
};
EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v1.begin(), v1.end(), Eq()));
EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end(), Eq()));
}
struct CountingTrivialPred {
int* count;
bool operator()(int, int) const {
++*count;
return true;
}
};
TEST(EqualTest, RandomAccessComplexity) {
std::vector<int> v1{1, 1, 3};
std::vector<int> v2 = v1;
std::vector<int> v3{1, 2};
do {
int count = 0;
absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end(),
CountingTrivialPred{&count});
EXPECT_LE(count, 3);
} while (std::next_permutation(v2.begin(), v2.end()));
int count = 0;
absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end(),
CountingTrivialPred{&count});
EXPECT_EQ(count, 0);
}
class LinearSearchTest : public testing::Test {
protected:
LinearSearchTest() : container_{1, 2, 3} {}
static bool Is3(int n) { return n == 3; }
static bool Is4(int n) { return n == 4; }
std::vector<int> container_;
};
TEST_F(LinearSearchTest, linear_search) {
EXPECT_TRUE(absl::linear_search(container_.begin(), container_.end(), 3));
EXPECT_FALSE(absl::linear_search(container_.begin(), container_.end(), 4));
}
TEST_F(LinearSearchTest, linear_searchConst) {
const std::vector<int> *const const_container = &container_;
EXPECT_TRUE(
absl::linear_search(const_container->begin(), const_container->end(), 3));
EXPECT_FALSE(
absl::linear_search(const_container->begin(), const_container->end(), 4));
}
TEST(RotateTest, Rotate) {
std::vector<int> v{0, 1, 2, 3, 4};
EXPECT_EQ(*absl::rotate(v.begin(), v.begin() + 2, v.end()), 0);
EXPECT_THAT(v, testing::ElementsAreArray({2, 3, 4, 0, 1}));
std::list<int> l{0, 1, 2, 3, 4};
EXPECT_EQ(*absl::rotate(l.begin(), std::next(l.begin(), 3), l.end()), 0);
EXPECT_THAT(l, testing::ElementsAreArray({3, 4, 0, 1, 2}));
}
} // namespace

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,126 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cstdint>
#include <cstring>
#include "benchmark/benchmark.h"
#include "absl/algorithm/algorithm.h"
namespace {
// The range of sequence sizes to benchmark.
constexpr int kMinBenchmarkSize = 1024;
constexpr int kMaxBenchmarkSize = 8 * 1024 * 1024;
// A user-defined type for use in equality benchmarks. Note that we expect
// std::memcmp to win for this type: libstdc++'s std::equal only defers to
// memcmp for integral types. This is because it is not straightforward to
// guarantee that std::memcmp would produce a result "as-if" compared by
// operator== for other types (example gotchas: NaN floats, structs with
// padding).
struct EightBits {
explicit EightBits(int /* unused */) : data(0) {}
bool operator==(const EightBits& rhs) const { return data == rhs.data; }
uint8_t data;
};
template <typename T>
void BM_absl_equal_benchmark(benchmark::State& state) {
std::vector<T> xs(state.range(0), T(0));
std::vector<T> ys = xs;
while (state.KeepRunning()) {
const bool same = absl::equal(xs.begin(), xs.end(), ys.begin(), ys.end());
benchmark::DoNotOptimize(same);
}
}
template <typename T>
void BM_std_equal_benchmark(benchmark::State& state) {
std::vector<T> xs(state.range(0), T(0));
std::vector<T> ys = xs;
while (state.KeepRunning()) {
const bool same = std::equal(xs.begin(), xs.end(), ys.begin());
benchmark::DoNotOptimize(same);
}
}
template <typename T>
void BM_memcmp_benchmark(benchmark::State& state) {
std::vector<T> xs(state.range(0), T(0));
std::vector<T> ys = xs;
while (state.KeepRunning()) {
const bool same =
std::memcmp(xs.data(), ys.data(), xs.size() * sizeof(T)) == 0;
benchmark::DoNotOptimize(same);
}
}
// The expectation is that the compiler should be able to elide the equality
// comparison altogether for sufficiently simple types.
template <typename T>
void BM_absl_equal_self_benchmark(benchmark::State& state) {
std::vector<T> xs(state.range(0), T(0));
while (state.KeepRunning()) {
const bool same = absl::equal(xs.begin(), xs.end(), xs.begin(), xs.end());
benchmark::DoNotOptimize(same);
}
}
BENCHMARK_TEMPLATE(BM_absl_equal_benchmark, uint8_t)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_std_equal_benchmark, uint8_t)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_memcmp_benchmark, uint8_t)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_absl_equal_self_benchmark, uint8_t)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_absl_equal_benchmark, uint16_t)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_std_equal_benchmark, uint16_t)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_memcmp_benchmark, uint16_t)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_absl_equal_self_benchmark, uint16_t)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_absl_equal_benchmark, uint32_t)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_std_equal_benchmark, uint32_t)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_memcmp_benchmark, uint32_t)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_absl_equal_self_benchmark, uint32_t)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_absl_equal_benchmark, uint64_t)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_std_equal_benchmark, uint64_t)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_memcmp_benchmark, uint64_t)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_absl_equal_self_benchmark, uint64_t)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_absl_equal_benchmark, EightBits)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_std_equal_benchmark, EightBits)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_memcmp_benchmark, EightBits)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
BENCHMARK_TEMPLATE(BM_absl_equal_self_benchmark, EightBits)
->Range(kMinBenchmarkSize, kMaxBenchmarkSize);
} // namespace

View File

@@ -0,0 +1,572 @@
#
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
load(
"//absl:copts/configure_copts.bzl",
"ABSL_DEFAULT_COPTS",
"ABSL_DEFAULT_LINKOPTS",
"ABSL_EXCEPTIONS_FLAG",
"ABSL_EXCEPTIONS_FLAG_LINKOPTS",
"ABSL_TEST_COPTS",
)
package(default_visibility = ["//visibility:public"])
licenses(["notice"]) # Apache 2.0
cc_library(
name = "atomic_hook",
hdrs = ["internal/atomic_hook.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
)
cc_library(
name = "log_severity",
srcs = ["log_severity.cc"],
hdrs = ["log_severity.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [":core_headers"],
)
cc_library(
name = "raw_logging_internal",
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
)
cc_library(
name = "spinlock_wait",
srcs = [
"internal/spinlock_akaros.inc",
"internal/spinlock_linux.inc",
"internal/spinlock_posix.inc",
"internal/spinlock_wait.cc",
"internal/spinlock_win32.inc",
],
hdrs = [
"internal/scheduling_mode.h",
"internal/spinlock_wait.h",
],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl/base:__pkg__",
],
deps = [":core_headers"],
)
cc_library(
name = "config",
hdrs = [
"config.h",
"policy_checks.h",
],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
)
cc_library(
name = "dynamic_annotations",
srcs = ["dynamic_annotations.cc"],
hdrs = ["dynamic_annotations.h"],
copts = ABSL_DEFAULT_COPTS,
defines = ["__CLANG_SUPPORT_DYN_ANNOTATION__"],
linkopts = ABSL_DEFAULT_LINKOPTS,
)
cc_library(
name = "core_headers",
srcs = [
"internal/thread_annotations.h",
],
hdrs = [
"attributes.h",
"const_init.h",
"macros.h",
"optimization.h",
"port.h",
"thread_annotations.h",
],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":config",
],
)
cc_library(
name = "malloc_internal",
srcs = [
"internal/low_level_alloc.cc",
],
hdrs = [
"internal/direct_mmap.h",
"internal/low_level_alloc.h",
],
copts = ABSL_DEFAULT_COPTS,
linkopts = select({
"//absl:windows": [],
"//conditions:default": ["-pthread"],
}) + ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
deps = [
":base",
":config",
":core_headers",
":dynamic_annotations",
":spinlock_wait",
],
)
cc_library(
name = "base_internal",
hdrs = [
"internal/hide_ptr.h",
"internal/identity.h",
"internal/inline_variable.h",
"internal/invoke.h",
"internal/scheduling_mode.h",
],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
deps = [
"//absl/meta:type_traits",
],
)
cc_library(
name = "base",
srcs = [
"internal/cycleclock.cc",
"internal/raw_logging.cc",
"internal/spinlock.cc",
"internal/sysinfo.cc",
"internal/thread_identity.cc",
"internal/unscaledcycleclock.cc",
],
hdrs = [
"call_once.h",
"casts.h",
"internal/cycleclock.h",
"internal/low_level_scheduling.h",
"internal/per_thread_tls.h",
"internal/raw_logging.h",
"internal/spinlock.h",
"internal/sysinfo.h",
"internal/thread_identity.h",
"internal/tsan_mutex_interface.h",
"internal/unscaledcycleclock.h",
],
copts = ABSL_DEFAULT_COPTS,
linkopts = select({
"//absl:windows": [],
"//conditions:default": ["-pthread"],
}) + ABSL_DEFAULT_LINKOPTS,
deps = [
":atomic_hook",
":base_internal",
":config",
":core_headers",
":dynamic_annotations",
":log_severity",
":spinlock_wait",
"//absl/meta:type_traits",
],
)
cc_test(
name = "atomic_hook_test",
size = "small",
srcs = ["internal/atomic_hook_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":atomic_hook",
":core_headers",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "bit_cast_test",
size = "small",
srcs = [
"bit_cast_test.cc",
],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":base",
":core_headers",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "throw_delegate",
srcs = ["internal/throw_delegate.cc"],
hdrs = ["internal/throw_delegate.h"],
copts = ABSL_DEFAULT_COPTS + ABSL_EXCEPTIONS_FLAG,
linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS + ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
deps = [
":base",
":config",
],
)
cc_test(
name = "throw_delegate_test",
srcs = ["throw_delegate_test.cc"],
copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS + ABSL_DEFAULT_LINKOPTS,
deps = [
":throw_delegate",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "exception_testing",
testonly = 1,
hdrs = ["internal/exception_testing.h"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
deps = [
":config",
"@com_google_googletest//:gtest",
],
)
cc_library(
name = "pretty_function",
hdrs = ["internal/pretty_function.h"],
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = ["//absl:__subpackages__"],
)
cc_library(
name = "exception_safety_testing",
testonly = 1,
srcs = ["internal/exception_safety_testing.cc"],
hdrs = ["internal/exception_safety_testing.h"],
copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS + ABSL_DEFAULT_LINKOPTS,
deps = [
":config",
":pretty_function",
"//absl/memory",
"//absl/meta:type_traits",
"//absl/strings",
"//absl/utility",
"@com_google_googletest//:gtest",
],
)
cc_test(
name = "exception_safety_testing_test",
srcs = ["exception_safety_testing_test.cc"],
copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS + ABSL_DEFAULT_LINKOPTS,
deps = [
":exception_safety_testing",
"//absl/memory",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "inline_variable_test",
size = "small",
srcs = [
"inline_variable_test.cc",
"inline_variable_test_a.cc",
"inline_variable_test_b.cc",
"internal/inline_variable_testing.h",
],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":base_internal",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "invoke_test",
size = "small",
srcs = ["invoke_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":base_internal",
"//absl/memory",
"//absl/strings",
"@com_google_googletest//:gtest_main",
],
)
# Common test library made available for use in non-absl code that overrides
# AbslInternalSpinLockDelay and AbslInternalSpinLockWake.
cc_library(
name = "spinlock_test_common",
testonly = 1,
srcs = ["spinlock_test_common.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":base",
":core_headers",
":spinlock_wait",
"//absl/synchronization",
"@com_google_googletest//:gtest",
],
alwayslink = 1,
)
cc_test(
name = "spinlock_test",
size = "medium",
srcs = ["spinlock_test_common.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":base",
":core_headers",
":spinlock_wait",
"//absl/synchronization",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "spinlock_benchmark_common",
testonly = 1,
srcs = ["internal/spinlock_benchmark.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl/base:__pkg__",
],
deps = [
":base",
":base_internal",
"//absl/synchronization",
"@com_github_google_benchmark//:benchmark_main",
],
alwayslink = 1,
)
cc_binary(
name = "spinlock_benchmark",
testonly = 1,
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = ["//visibility:private"],
deps = [
":spinlock_benchmark_common",
],
)
cc_library(
name = "endian",
hdrs = [
"internal/endian.h",
"internal/unaligned_access.h",
],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":config",
":core_headers",
],
)
cc_test(
name = "endian_test",
srcs = ["internal/endian_test.cc"],
copts = ABSL_TEST_COPTS,
deps = [
":config",
":endian",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "config_test",
srcs = ["config_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":config",
"//absl/synchronization:thread_pool",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "call_once_test",
srcs = ["call_once_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":base",
":core_headers",
"//absl/synchronization",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "raw_logging_test",
srcs = ["raw_logging_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":base",
"//absl/strings",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "sysinfo_test",
size = "small",
srcs = ["internal/sysinfo_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":base",
"//absl/synchronization",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "low_level_alloc_test",
size = "medium",
srcs = ["internal/low_level_alloc_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["no_test_ios_x86_64"],
deps = [":malloc_internal"],
)
cc_test(
name = "thread_identity_test",
size = "small",
srcs = ["internal/thread_identity_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":base",
":core_headers",
"//absl/synchronization",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "thread_identity_benchmark",
srcs = ["internal/thread_identity_benchmark.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["benchmark"],
visibility = ["//visibility:private"],
deps = [
":base",
"//absl/synchronization",
"@com_github_google_benchmark//:benchmark_main",
],
)
cc_library(
name = "bits",
hdrs = ["internal/bits.h"],
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
deps = [":core_headers"],
)
cc_test(
name = "bits_test",
size = "small",
srcs = ["internal/bits_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":bits",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "scoped_set_env",
testonly = 1,
srcs = ["internal/scoped_set_env.cc"],
hdrs = ["internal/scoped_set_env.h"],
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
deps = [":base"],
)
cc_test(
name = "scoped_set_env_test",
size = "small",
srcs = ["internal/scoped_set_env_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":scoped_set_env",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "log_severity_test",
size = "small",
srcs = ["log_severity_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":base",
":log_severity",
"@com_google_googletest//:gtest_main",
],
)

View File

@@ -0,0 +1,532 @@
#
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
absl_cc_library(
NAME
atomic_hook
HDRS
"internal/atomic_hook.h"
COPTS
${ABSL_DEFAULT_COPTS}
)
absl_cc_library(
NAME
log_severity
HDRS
"log_severity.h"
SRCS
"log_severity.cc"
DEPS
absl::core_headers
COPTS
${ABSL_DEFAULT_COPTS}
)
absl_cc_library(
NAME
raw_logging_internal
COPTS
${ABSL_DEFAULT_COPTS}
)
absl_cc_library(
NAME
spinlock_wait
HDRS
"internal/scheduling_mode.h"
"internal/spinlock_wait.h"
SRCS
"internal/spinlock_akaros.inc"
"internal/spinlock_linux.inc"
"internal/spinlock_posix.inc"
"internal/spinlock_wait.cc"
"internal/spinlock_win32.inc"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::core_headers
)
absl_cc_library(
NAME
config
HDRS
"config.h"
"policy_checks.h"
COPTS
${ABSL_DEFAULT_COPTS}
PUBLIC
)
absl_cc_library(
NAME
dynamic_annotations
HDRS
"dynamic_annotations.h"
SRCS
"dynamic_annotations.cc"
COPTS
${ABSL_DEFAULT_COPTS}
DEFINES
"__CLANG_SUPPORT_DYN_ANNOTATION__"
PUBLIC
)
absl_cc_library(
NAME
core_headers
HDRS
"attributes.h"
"const_init.h"
"macros.h"
"optimization.h"
"port.h"
"thread_annotations.h"
"internal/thread_annotations.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
PUBLIC
)
absl_cc_library(
NAME
malloc_internal
HDRS
"internal/direct_mmap.h"
"internal/low_level_alloc.h"
SRCS
"internal/low_level_alloc.cc"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::base
absl::config
absl::core_headers
absl::dynamic_annotations
absl::spinlock_wait
Threads::Threads
)
absl_cc_library(
NAME
base_internal
HDRS
"internal/hide_ptr.h"
"internal/identity.h"
"internal/inline_variable.h"
"internal/invoke.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::type_traits
)
absl_cc_library(
NAME
base
HDRS
"call_once.h"
"casts.h"
"internal/cycleclock.h"
"internal/low_level_scheduling.h"
"internal/per_thread_tls.h"
"internal/raw_logging.h"
"internal/spinlock.h"
"internal/sysinfo.h"
"internal/thread_identity.h"
"internal/tsan_mutex_interface.h"
"internal/unscaledcycleclock.h"
"log_severity.h"
SRCS
"internal/cycleclock.cc"
"internal/raw_logging.cc"
"internal/spinlock.cc"
"internal/sysinfo.cc"
"internal/thread_identity.cc"
"internal/unscaledcycleclock.cc"
"log_severity.cc"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::atomic_hook
absl::base_internal
absl::config
absl::core_headers
absl::dynamic_annotations
absl::log_severity
absl::spinlock_wait
absl::type_traits
Threads::Threads
PUBLIC
)
absl_cc_library(
NAME
throw_delegate
HDRS
"internal/throw_delegate.h"
SRCS
"internal/throw_delegate.cc"
COPTS
${ABSL_DEFAULT_COPTS}
${ABSL_EXCEPTIONS_FLAG}
DEPS
absl::base
)
absl_cc_library(
NAME
exception_testing
HDRS
"internal/exception_testing.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
gtest
TESTONLY
)
absl_cc_library(
NAME
pretty_function
HDRS
"internal/pretty_function.h"
COPTS
${ABSL_DEFAULT_COPTS}
)
absl_cc_library(
NAME
exception_safety_testing
HDRS
"internal/exception_safety_testing.h"
SRCS
"internal/exception_safety_testing.cc"
COPTS
${ABSL_TEST_COPTS}
${ABSL_EXCEPTIONS_FLAG}
LINKOPTS
${ABSL_EXCEPTIONS_FLAG_LINKOPTS}
DEPS
absl::config
absl::pretty_function
absl::memory
absl::meta
absl::strings
absl::utility
gtest
TESTONLY
)
absl_cc_test(
NAME
absl_exception_safety_testing_test
SRCS
"exception_safety_testing_test.cc"
COPTS
${ABSL_TEST_COPTS}
${ABSL_EXCEPTIONS_FLAG}
LINKOPTS
${ABSL_EXCEPTIONS_FLAG_LINKOPTS}
DEPS
absl::exception_safety_testing
absl::memory
gtest_main
)
absl_cc_test(
NAME
atomic_hook_test
SRCS
"internal/atomic_hook_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::atomic_hook
absl::core_headers
gtest_main
)
absl_cc_test(
NAME
bit_cast_test
SRCS
"bit_cast_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::base
absl::core_headers
gtest_main
)
absl_cc_test(
NAME
throw_delegate_test
SRCS
"throw_delegate_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::base
absl::throw_delegate
gtest_main
)
absl_cc_test(
NAME
inline_variable_test
SRCS
"internal/inline_variable_testing.h"
"inline_variable_test.cc"
"inline_variable_test_a.cc"
"inline_variable_test_b.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::base_internal
gtest_main
)
absl_cc_test(
NAME
invoke_test
SRCS
"invoke_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::base_internal
absl::memory
absl::strings
gmock
gtest_main
)
absl_cc_library(
NAME
spinlock_test_common
SRCS
"spinlock_test_common.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::base
absl::core_headers
absl::spinlock_wait
absl::synchronization
gtest
TESTONLY
)
# On bazel BUILD this target use "alwayslink = 1" which is not implemented here
absl_cc_test(
NAME
spinlock_test
SRCS
"spinlock_test_common.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::base
absl::core_headers
absl::spinlock_wait
absl::synchronization
gtest_main
)
absl_cc_library(
NAME
endian
HDRS
"internal/endian.h"
"internal/unaligned_access.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
absl::core_headers
PUBLIC
)
absl_cc_test(
NAME
endian_test
SRCS
"internal/endian_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::base
absl::config
absl::endian
gtest_main
)
absl_cc_test(
NAME
config_test
SRCS
"config_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::config
absl::synchronization
gtest_main
)
absl_cc_test(
NAME
call_once_test
SRCS
"call_once_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::base
absl::core_headers
absl::synchronization
gtest_main
)
absl_cc_test(
NAME
raw_logging_test
SRCS
"raw_logging_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::base
absl::strings
gtest_main
)
absl_cc_test(
NAME
sysinfo_test
SRCS
"internal/sysinfo_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::base
absl::synchronization
gtest_main
)
absl_cc_test(
NAME
low_level_alloc_test
SRCS
"internal/low_level_alloc_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::malloc_internal
Threads::Threads
)
absl_cc_test(
NAME
thread_identity_test
SRCS
"internal/thread_identity_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::base
absl::core_headers
absl::synchronization
Threads::Threads
gtest_main
)
absl_cc_library(
NAME
bits
HDRS
"internal/bits.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::core_headers
)
absl_cc_test(
NAME
bits_test
SRCS
"internal/bits_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::bits
gtest_main
)
absl_cc_library(
NAME
scoped_set_env
SRCS
"internal/scoped_set_env.cc"
HDRS
"internal/scoped_set_env.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::base
)
absl_cc_test(
NAME
scoped_set_env_test
SRCS
"internal/scoped_set_env_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::scoped_set_env
gtest_main
)
absl_cc_test(
NAME
cmake_thread_test
SRCS
"internal/cmake_thread_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::base
)
absl_cc_test(
NAME
log_severity_test
SRCS
"log_severity_test.cc"
DEPS
absl::base
absl::log_severity
gmock
gtest_main
)

View File

@@ -0,0 +1,608 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This header file defines macros for declaring attributes for functions,
// types, and variables.
//
// These macros are used within Abseil and allow the compiler to optimize, where
// applicable, certain function calls.
//
// This file is used for both C and C++!
//
// Most macros here are exposing GCC or Clang features, and are stubbed out for
// other compilers.
//
// GCC attributes documentation:
// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html
// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Variable-Attributes.html
// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Type-Attributes.html
//
// Most attributes in this file are already supported by GCC 4.7. However, some
// of them are not supported in older version of Clang. Thus, we check
// `__has_attribute()` first. If the check fails, we check if we are on GCC and
// assume the attribute exists on GCC (which is verified on GCC 4.7).
//
// -----------------------------------------------------------------------------
// Sanitizer Attributes
// -----------------------------------------------------------------------------
//
// Sanitizer-related attributes are not "defined" in this file (and indeed
// are not defined as such in any file). To utilize the following
// sanitizer-related attributes within your builds, define the following macros
// within your build using a `-D` flag, along with the given value for
// `-fsanitize`:
//
// * `ADDRESS_SANITIZER` + `-fsanitize=address` (Clang, GCC 4.8)
// * `MEMORY_SANITIZER` + `-fsanitize=memory` (Clang-only)
// * `THREAD_SANITIZER + `-fsanitize=thread` (Clang, GCC 4.8+)
// * `UNDEFINED_BEHAVIOR_SANITIZER` + `-fsanitize=undefined` (Clang, GCC 4.9+)
// * `CONTROL_FLOW_INTEGRITY` + -fsanitize=cfi (Clang-only)
//
// Example:
//
// // Enable branches in the Abseil code that are tagged for ASan:
// $ bazel build --copt=-DADDRESS_SANITIZER --copt=-fsanitize=address
// --linkopt=-fsanitize=address *target*
//
// Since these macro names are only supported by GCC and Clang, we only check
// for `__GNUC__` (GCC or Clang) and the above macros.
#ifndef ABSL_BASE_ATTRIBUTES_H_
#define ABSL_BASE_ATTRIBUTES_H_
// ABSL_HAVE_ATTRIBUTE
//
// A function-like feature checking macro that is a wrapper around
// `__has_attribute`, which is defined by GCC 5+ and Clang and evaluates to a
// nonzero constant integer if the attribute is supported or 0 if not.
//
// It evaluates to zero if `__has_attribute` is not defined by the compiler.
//
// GCC: https://gcc.gnu.org/gcc-5/changes.html
// Clang: https://clang.llvm.org/docs/LanguageExtensions.html
#ifdef __has_attribute
#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x)
#else
#define ABSL_HAVE_ATTRIBUTE(x) 0
#endif
// ABSL_HAVE_CPP_ATTRIBUTE
//
// A function-like feature checking macro that accepts C++11 style attributes.
// It's a wrapper around `__has_cpp_attribute`, defined by ISO C++ SD-6
// (https://en.cppreference.com/w/cpp/experimental/feature_test). If we don't
// find `__has_cpp_attribute`, will evaluate to 0.
#if defined(__cplusplus) && defined(__has_cpp_attribute)
// NOTE: requiring __cplusplus above should not be necessary, but
// works around https://bugs.llvm.org/show_bug.cgi?id=23435.
#define ABSL_HAVE_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
#else
#define ABSL_HAVE_CPP_ATTRIBUTE(x) 0
#endif
// -----------------------------------------------------------------------------
// Function Attributes
// -----------------------------------------------------------------------------
//
// GCC: https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
// Clang: https://clang.llvm.org/docs/AttributeReference.html
// ABSL_PRINTF_ATTRIBUTE
// ABSL_SCANF_ATTRIBUTE
//
// Tells the compiler to perform `printf` format string checking if the
// compiler supports it; see the 'format' attribute in
// <https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html>.
//
// Note: As the GCC manual states, "[s]ince non-static C++ methods
// have an implicit 'this' argument, the arguments of such methods
// should be counted from two, not one."
#if ABSL_HAVE_ATTRIBUTE(format) || (defined(__GNUC__) && !defined(__clang__))
#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) \
__attribute__((__format__(__printf__, string_index, first_to_check)))
#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) \
__attribute__((__format__(__scanf__, string_index, first_to_check)))
#else
#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check)
#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check)
#endif
// ABSL_ATTRIBUTE_ALWAYS_INLINE
// ABSL_ATTRIBUTE_NOINLINE
//
// Forces functions to either inline or not inline. Introduced in gcc 3.1.
#if ABSL_HAVE_ATTRIBUTE(always_inline) || \
(defined(__GNUC__) && !defined(__clang__))
#define ABSL_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
#define ABSL_HAVE_ATTRIBUTE_ALWAYS_INLINE 1
#else
#define ABSL_ATTRIBUTE_ALWAYS_INLINE
#endif
#if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__))
#define ABSL_ATTRIBUTE_NOINLINE __attribute__((noinline))
#define ABSL_HAVE_ATTRIBUTE_NOINLINE 1
#else
#define ABSL_ATTRIBUTE_NOINLINE
#endif
// ABSL_ATTRIBUTE_NO_TAIL_CALL
//
// Prevents the compiler from optimizing away stack frames for functions which
// end in a call to another function.
#if ABSL_HAVE_ATTRIBUTE(disable_tail_calls)
#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1
#define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls))
#elif defined(__GNUC__) && !defined(__clang__)
#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1
#define ABSL_ATTRIBUTE_NO_TAIL_CALL \
__attribute__((optimize("no-optimize-sibling-calls")))
#else
#define ABSL_ATTRIBUTE_NO_TAIL_CALL
#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 0
#endif
// ABSL_ATTRIBUTE_WEAK
//
// Tags a function as weak for the purposes of compilation and linking.
// Weak attributes currently do not work properly in LLVM's Windows backend,
// so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598
// for futher information.
#if (ABSL_HAVE_ATTRIBUTE(weak) || \
(defined(__GNUC__) && !defined(__clang__))) && \
!(defined(__llvm__) && defined(_WIN32))
#undef ABSL_ATTRIBUTE_WEAK
#define ABSL_ATTRIBUTE_WEAK __attribute__((weak))
#define ABSL_HAVE_ATTRIBUTE_WEAK 1
#else
#define ABSL_ATTRIBUTE_WEAK
#define ABSL_HAVE_ATTRIBUTE_WEAK 0
#endif
// ABSL_ATTRIBUTE_NONNULL
//
// Tells the compiler either (a) that a particular function parameter
// should be a non-null pointer, or (b) that all pointer arguments should
// be non-null.
//
// Note: As the GCC manual states, "[s]ince non-static C++ methods
// have an implicit 'this' argument, the arguments of such methods
// should be counted from two, not one."
//
// Args are indexed starting at 1.
//
// For non-static class member functions, the implicit `this` argument
// is arg 1, and the first explicit argument is arg 2. For static class member
// functions, there is no implicit `this`, and the first explicit argument is
// arg 1.
//
// Example:
//
// /* arg_a cannot be null, but arg_b can */
// void Function(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(1);
//
// class C {
// /* arg_a cannot be null, but arg_b can */
// void Method(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(2);
//
// /* arg_a cannot be null, but arg_b can */
// static void StaticMethod(void* arg_a, void* arg_b)
// ABSL_ATTRIBUTE_NONNULL(1);
// };
//
// If no arguments are provided, then all pointer arguments should be non-null.
//
// /* No pointer arguments may be null. */
// void Function(void* arg_a, void* arg_b, int arg_c) ABSL_ATTRIBUTE_NONNULL();
//
// NOTE: The GCC nonnull attribute actually accepts a list of arguments, but
// ABSL_ATTRIBUTE_NONNULL does not.
#if ABSL_HAVE_ATTRIBUTE(nonnull) || (defined(__GNUC__) && !defined(__clang__))
#define ABSL_ATTRIBUTE_NONNULL(arg_index) __attribute__((nonnull(arg_index)))
#else
#define ABSL_ATTRIBUTE_NONNULL(...)
#endif
// ABSL_ATTRIBUTE_NORETURN
//
// Tells the compiler that a given function never returns.
#if ABSL_HAVE_ATTRIBUTE(noreturn) || (defined(__GNUC__) && !defined(__clang__))
#define ABSL_ATTRIBUTE_NORETURN __attribute__((noreturn))
#elif defined(_MSC_VER)
#define ABSL_ATTRIBUTE_NORETURN __declspec(noreturn)
#else
#define ABSL_ATTRIBUTE_NORETURN
#endif
// ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS
//
// Tells the AddressSanitizer (or other memory testing tools) to ignore a given
// function. Useful for cases when a function reads random locations on stack,
// calls _exit from a cloned subprocess, deliberately accesses buffer
// out of bounds or does other scary things with memory.
// NOTE: GCC supports AddressSanitizer(asan) since 4.8.
// https://gcc.gnu.org/gcc-4.8/changes.html
#if defined(__GNUC__)
#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
#else
#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS
#endif
// ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
//
// Tells the MemorySanitizer to relax the handling of a given function. All
// "Use of uninitialized value" warnings from such functions will be suppressed,
// and all values loaded from memory will be considered fully initialized.
// This attribute is similar to the ADDRESS_SANITIZER attribute above, but deals
// with initialized-ness rather than addressability issues.
// NOTE: MemorySanitizer(msan) is supported by Clang but not GCC.
#if defined(__clang__)
#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory))
#else
#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
#endif
// ABSL_ATTRIBUTE_NO_SANITIZE_THREAD
//
// Tells the ThreadSanitizer to not instrument a given function.
// NOTE: GCC supports ThreadSanitizer(tsan) since 4.8.
// https://gcc.gnu.org/gcc-4.8/changes.html
#if defined(__GNUC__)
#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread))
#else
#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD
#endif
// ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED
//
// Tells the UndefinedSanitizer to ignore a given function. Useful for cases
// where certain behavior (eg. division by zero) is being used intentionally.
// NOTE: GCC supports UndefinedBehaviorSanitizer(ubsan) since 4.9.
// https://gcc.gnu.org/gcc-4.9/changes.html
#if defined(__GNUC__) && \
(defined(UNDEFINED_BEHAVIOR_SANITIZER) || defined(ADDRESS_SANITIZER))
#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \
__attribute__((no_sanitize("undefined")))
#else
#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED
#endif
// ABSL_ATTRIBUTE_NO_SANITIZE_CFI
//
// Tells the ControlFlowIntegrity sanitizer to not instrument a given function.
// See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details.
#if defined(__GNUC__) && defined(CONTROL_FLOW_INTEGRITY)
#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi")))
#else
#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI
#endif
// ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK
//
// Tells the SafeStack to not instrument a given function.
// See https://clang.llvm.org/docs/SafeStack.html for details.
#if defined(__GNUC__) && defined(SAFESTACK_SANITIZER)
#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK \
__attribute__((no_sanitize("safe-stack")))
#else
#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK
#endif
// ABSL_ATTRIBUTE_RETURNS_NONNULL
//
// Tells the compiler that a particular function never returns a null pointer.
#if ABSL_HAVE_ATTRIBUTE(returns_nonnull) || \
(defined(__GNUC__) && \
(__GNUC__ > 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)) && \
!defined(__clang__))
#define ABSL_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull))
#else
#define ABSL_ATTRIBUTE_RETURNS_NONNULL
#endif
// ABSL_HAVE_ATTRIBUTE_SECTION
//
// Indicates whether labeled sections are supported. Weak symbol support is
// a prerequisite. Labeled sections are not supported on Darwin/iOS.
#ifdef ABSL_HAVE_ATTRIBUTE_SECTION
#error ABSL_HAVE_ATTRIBUTE_SECTION cannot be directly set
#elif (ABSL_HAVE_ATTRIBUTE(section) || \
(defined(__GNUC__) && !defined(__clang__))) && \
!defined(__APPLE__) && ABSL_HAVE_ATTRIBUTE_WEAK
#define ABSL_HAVE_ATTRIBUTE_SECTION 1
// ABSL_ATTRIBUTE_SECTION
//
// Tells the compiler/linker to put a given function into a section and define
// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section.
// This functionality is supported by GNU linker. Any function annotated with
// `ABSL_ATTRIBUTE_SECTION` must not be inlined, or it will be placed into
// whatever section its caller is placed into.
//
#ifndef ABSL_ATTRIBUTE_SECTION
#define ABSL_ATTRIBUTE_SECTION(name) \
__attribute__((section(#name))) __attribute__((noinline))
#endif
// ABSL_ATTRIBUTE_SECTION_VARIABLE
//
// Tells the compiler/linker to put a given variable into a section and define
// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section.
// This functionality is supported by GNU linker.
#ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE
#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name)))
#endif
// ABSL_DECLARE_ATTRIBUTE_SECTION_VARS
//
// A weak section declaration to be used as a global declaration
// for ABSL_ATTRIBUTE_SECTION_START|STOP(name) to compile and link
// even without functions with ABSL_ATTRIBUTE_SECTION(name).
// ABSL_DEFINE_ATTRIBUTE_SECTION should be in the exactly one file; it's
// a no-op on ELF but not on Mach-O.
//
#ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS
#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \
extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \
extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK
#endif
#ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS
#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name)
#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name)
#endif
// ABSL_ATTRIBUTE_SECTION_START
//
// Returns `void*` pointers to start/end of a section of code with
// functions having ABSL_ATTRIBUTE_SECTION(name).
// Returns 0 if no such functions exist.
// One must ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) for this to compile and
// link.
//
#define ABSL_ATTRIBUTE_SECTION_START(name) \
(reinterpret_cast<void *>(__start_##name))
#define ABSL_ATTRIBUTE_SECTION_STOP(name) \
(reinterpret_cast<void *>(__stop_##name))
#else // !ABSL_HAVE_ATTRIBUTE_SECTION
#define ABSL_HAVE_ATTRIBUTE_SECTION 0
// provide dummy definitions
#define ABSL_ATTRIBUTE_SECTION(name)
#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name)
#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name)
#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name)
#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name)
#define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void *>(0))
#define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void *>(0))
#endif // ABSL_ATTRIBUTE_SECTION
// ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC
//
// Support for aligning the stack on 32-bit x86.
#if ABSL_HAVE_ATTRIBUTE(force_align_arg_pointer) || \
(defined(__GNUC__) && !defined(__clang__))
#if defined(__i386__)
#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC \
__attribute__((force_align_arg_pointer))
#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0)
#elif defined(__x86_64__)
#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (1)
#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC
#else // !__i386__ && !__x86_64
#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0)
#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC
#endif // __i386__
#else
#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC
#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0)
#endif
// ABSL_MUST_USE_RESULT
//
// Tells the compiler to warn about unused results.
//
// When annotating a function, it must appear as the first part of the
// declaration or definition. The compiler will warn if the return value from
// such a function is unused:
//
// ABSL_MUST_USE_RESULT Sprocket* AllocateSprocket();
// AllocateSprocket(); // Triggers a warning.
//
// When annotating a class, it is equivalent to annotating every function which
// returns an instance.
//
// class ABSL_MUST_USE_RESULT Sprocket {};
// Sprocket(); // Triggers a warning.
//
// Sprocket MakeSprocket();
// MakeSprocket(); // Triggers a warning.
//
// Note that references and pointers are not instances:
//
// Sprocket* SprocketPointer();
// SprocketPointer(); // Does *not* trigger a warning.
//
// ABSL_MUST_USE_RESULT allows using cast-to-void to suppress the unused result
// warning. For that, warn_unused_result is used only for clang but not for gcc.
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425
//
// Note: past advice was to place the macro after the argument list.
#if ABSL_HAVE_ATTRIBUTE(nodiscard)
#define ABSL_MUST_USE_RESULT [[nodiscard]]
#elif defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result)
#define ABSL_MUST_USE_RESULT __attribute__((warn_unused_result))
#else
#define ABSL_MUST_USE_RESULT
#endif
// ABSL_ATTRIBUTE_HOT, ABSL_ATTRIBUTE_COLD
//
// Tells GCC that a function is hot or cold. GCC can use this information to
// improve static analysis, i.e. a conditional branch to a cold function
// is likely to be not-taken.
// This annotation is used for function declarations.
//
// Example:
//
// int foo() ABSL_ATTRIBUTE_HOT;
#if ABSL_HAVE_ATTRIBUTE(hot) || (defined(__GNUC__) && !defined(__clang__))
#define ABSL_ATTRIBUTE_HOT __attribute__((hot))
#else
#define ABSL_ATTRIBUTE_HOT
#endif
#if ABSL_HAVE_ATTRIBUTE(cold) || (defined(__GNUC__) && !defined(__clang__))
#define ABSL_ATTRIBUTE_COLD __attribute__((cold))
#else
#define ABSL_ATTRIBUTE_COLD
#endif
// ABSL_XRAY_ALWAYS_INSTRUMENT, ABSL_XRAY_NEVER_INSTRUMENT, ABSL_XRAY_LOG_ARGS
//
// We define the ABSL_XRAY_ALWAYS_INSTRUMENT and ABSL_XRAY_NEVER_INSTRUMENT
// macro used as an attribute to mark functions that must always or never be
// instrumented by XRay. Currently, this is only supported in Clang/LLVM.
//
// For reference on the LLVM XRay instrumentation, see
// http://llvm.org/docs/XRay.html.
//
// A function with the XRAY_ALWAYS_INSTRUMENT macro attribute in its declaration
// will always get the XRay instrumentation sleds. These sleds may introduce
// some binary size and runtime overhead and must be used sparingly.
//
// These attributes only take effect when the following conditions are met:
//
// * The file/target is built in at least C++11 mode, with a Clang compiler
// that supports XRay attributes.
// * The file/target is built with the -fxray-instrument flag set for the
// Clang/LLVM compiler.
// * The function is defined in the translation unit (the compiler honors the
// attribute in either the definition or the declaration, and must match).
//
// There are cases when, even when building with XRay instrumentation, users
// might want to control specifically which functions are instrumented for a
// particular build using special-case lists provided to the compiler. These
// special case lists are provided to Clang via the
// -fxray-always-instrument=... and -fxray-never-instrument=... flags. The
// attributes in source take precedence over these special-case lists.
//
// To disable the XRay attributes at build-time, users may define
// ABSL_NO_XRAY_ATTRIBUTES. Do NOT define ABSL_NO_XRAY_ATTRIBUTES on specific
// packages/targets, as this may lead to conflicting definitions of functions at
// link-time.
//
#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_always_instrument) && \
!defined(ABSL_NO_XRAY_ATTRIBUTES)
#define ABSL_XRAY_ALWAYS_INSTRUMENT [[clang::xray_always_instrument]]
#define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]]
#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args)
#define ABSL_XRAY_LOG_ARGS(N) \
[[clang::xray_always_instrument, clang::xray_log_args(N)]]
#else
#define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]]
#endif
#else
#define ABSL_XRAY_ALWAYS_INSTRUMENT
#define ABSL_XRAY_NEVER_INSTRUMENT
#define ABSL_XRAY_LOG_ARGS(N)
#endif
// ABSL_ATTRIBUTE_REINITIALIZES
//
// Indicates that a member function reinitializes the entire object to a known
// state, independent of the previous state of the object.
//
// The clang-tidy check bugprone-use-after-move allows member functions marked
// with this attribute to be called on objects that have been moved from;
// without the attribute, this would result in a use-after-move warning.
#if ABSL_HAVE_CPP_ATTRIBUTE(clang::reinitializes)
#define ABSL_ATTRIBUTE_REINITIALIZES [[clang::reinitializes]]
#else
#define ABSL_ATTRIBUTE_REINITIALIZES
#endif
// -----------------------------------------------------------------------------
// Variable Attributes
// -----------------------------------------------------------------------------
// ABSL_ATTRIBUTE_UNUSED
//
// Prevents the compiler from complaining about variables that appear unused.
#if ABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__))
#undef ABSL_ATTRIBUTE_UNUSED
#define ABSL_ATTRIBUTE_UNUSED __attribute__((__unused__))
#else
#define ABSL_ATTRIBUTE_UNUSED
#endif
// ABSL_ATTRIBUTE_INITIAL_EXEC
//
// Tells the compiler to use "initial-exec" mode for a thread-local variable.
// See http://people.redhat.com/drepper/tls.pdf for the gory details.
#if ABSL_HAVE_ATTRIBUTE(tls_model) || (defined(__GNUC__) && !defined(__clang__))
#define ABSL_ATTRIBUTE_INITIAL_EXEC __attribute__((tls_model("initial-exec")))
#else
#define ABSL_ATTRIBUTE_INITIAL_EXEC
#endif
// ABSL_ATTRIBUTE_PACKED
//
// Prevents the compiler from padding a structure to natural alignment
#if ABSL_HAVE_ATTRIBUTE(packed) || (defined(__GNUC__) && !defined(__clang__))
#define ABSL_ATTRIBUTE_PACKED __attribute__((__packed__))
#else
#define ABSL_ATTRIBUTE_PACKED
#endif
// ABSL_ATTRIBUTE_FUNC_ALIGN
//
// Tells the compiler to align the function start at least to certain
// alignment boundary
#if ABSL_HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__))
#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) __attribute__((aligned(bytes)))
#else
#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes)
#endif
// ABSL_CONST_INIT
//
// A variable declaration annotated with the `ABSL_CONST_INIT` attribute will
// not compile (on supported platforms) unless the variable has a constant
// initializer. This is useful for variables with static and thread storage
// duration, because it guarantees that they will not suffer from the so-called
// "static init order fiasco". Prefer to put this attribute on the most visible
// declaration of the variable, if there's more than one, because code that
// accesses the variable can then use the attribute for optimization.
//
// Example:
//
// class MyClass {
// public:
// ABSL_CONST_INIT static MyType my_var;
// };
//
// MyType MyClass::my_var = MakeMyType(...);
//
// Note that this attribute is redundant if the variable is declared constexpr.
#if ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
// NOLINTNEXTLINE(whitespace/braces)
#define ABSL_CONST_INIT [[clang::require_constant_initialization]]
#else
#define ABSL_CONST_INIT
#endif // ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
#endif // ABSL_BASE_ATTRIBUTES_H_

View File

@@ -0,0 +1,109 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Unit test for bit_cast template.
#include <cstdint>
#include <cstring>
#include "gtest/gtest.h"
#include "absl/base/casts.h"
#include "absl/base/macros.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace {
template <int N>
struct marshall { char buf[N]; };
template <typename T>
void TestMarshall(const T values[], int num_values) {
for (int i = 0; i < num_values; ++i) {
T t0 = values[i];
marshall<sizeof(T)> m0 = absl::bit_cast<marshall<sizeof(T)> >(t0);
T t1 = absl::bit_cast<T>(m0);
marshall<sizeof(T)> m1 = absl::bit_cast<marshall<sizeof(T)> >(t1);
ASSERT_EQ(0, memcmp(&t0, &t1, sizeof(T)));
ASSERT_EQ(0, memcmp(&m0, &m1, sizeof(T)));
}
}
// Convert back and forth to an integral type. The C++ standard does
// not guarantee this will work, but we test that this works on all the
// platforms we support.
//
// Likewise, we below make assumptions about sizeof(float) and
// sizeof(double) which the standard does not guarantee, but which hold on the
// platforms we support.
template <typename T, typename I>
void TestIntegral(const T values[], int num_values) {
for (int i = 0; i < num_values; ++i) {
T t0 = values[i];
I i0 = absl::bit_cast<I>(t0);
T t1 = absl::bit_cast<T>(i0);
I i1 = absl::bit_cast<I>(t1);
ASSERT_EQ(0, memcmp(&t0, &t1, sizeof(T)));
ASSERT_EQ(i0, i1);
}
}
TEST(BitCast, Bool) {
static const bool bool_list[] = { false, true };
TestMarshall<bool>(bool_list, ABSL_ARRAYSIZE(bool_list));
}
TEST(BitCast, Int32) {
static const int32_t int_list[] =
{ 0, 1, 100, 2147483647, -1, -100, -2147483647, -2147483647-1 };
TestMarshall<int32_t>(int_list, ABSL_ARRAYSIZE(int_list));
}
TEST(BitCast, Int64) {
static const int64_t int64_list[] =
{ 0, 1, 1LL << 40, -1, -(1LL<<40) };
TestMarshall<int64_t>(int64_list, ABSL_ARRAYSIZE(int64_list));
}
TEST(BitCast, Uint64) {
static const uint64_t uint64_list[] =
{ 0, 1, 1LLU << 40, 1LLU << 63 };
TestMarshall<uint64_t>(uint64_list, ABSL_ARRAYSIZE(uint64_list));
}
TEST(BitCast, Float) {
static const float float_list[] =
{ 0.0f, 1.0f, -1.0f, 10.0f, -10.0f,
1e10f, 1e20f, 1e-10f, 1e-20f,
2.71828f, 3.14159f };
TestMarshall<float>(float_list, ABSL_ARRAYSIZE(float_list));
TestIntegral<float, int>(float_list, ABSL_ARRAYSIZE(float_list));
TestIntegral<float, unsigned>(float_list, ABSL_ARRAYSIZE(float_list));
}
TEST(BitCast, Double) {
static const double double_list[] =
{ 0.0, 1.0, -1.0, 10.0, -10.0,
1e10, 1e100, 1e-10, 1e-100,
2.718281828459045,
3.141592653589793238462643383279502884197169399375105820974944 };
TestMarshall<double>(double_list, ABSL_ARRAYSIZE(double_list));
TestIntegral<double, int64_t>(double_list, ABSL_ARRAYSIZE(double_list));
TestIntegral<double, uint64_t>(double_list, ABSL_ARRAYSIZE(double_list));
}
} // namespace
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,216 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: call_once.h
// -----------------------------------------------------------------------------
//
// This header file provides an Abseil version of `std::call_once` for invoking
// a given function at most once, across all threads. This Abseil version is
// faster than the C++11 version and incorporates the C++17 argument-passing
// fix, so that (for example) non-const references may be passed to the invoked
// function.
#ifndef ABSL_BASE_CALL_ONCE_H_
#define ABSL_BASE_CALL_ONCE_H_
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <type_traits>
#include <utility>
#include "absl/base/internal/invoke.h"
#include "absl/base/internal/low_level_scheduling.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/scheduling_mode.h"
#include "absl/base/internal/spinlock_wait.h"
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/base/port.h"
namespace absl {
inline namespace lts_2019_08_08 {
class once_flag;
namespace base_internal {
std::atomic<uint32_t>* ControlWord(absl::once_flag* flag);
} // namespace base_internal
// call_once()
//
// For all invocations using a given `once_flag`, invokes a given `fn` exactly
// once across all threads. The first call to `call_once()` with a particular
// `once_flag` argument (that does not throw an exception) will run the
// specified function with the provided `args`; other calls with the same
// `once_flag` argument will not run the function, but will wait
// for the provided function to finish running (if it is still running).
//
// This mechanism provides a safe, simple, and fast mechanism for one-time
// initialization in a multi-threaded process.
//
// Example:
//
// class MyInitClass {
// public:
// ...
// mutable absl::once_flag once_;
//
// MyInitClass* init() const {
// absl::call_once(once_, &MyInitClass::Init, this);
// return ptr_;
// }
//
template <typename Callable, typename... Args>
void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args);
// once_flag
//
// Objects of this type are used to distinguish calls to `call_once()` and
// ensure the provided function is only invoked once across all threads. This
// type is not copyable or movable. However, it has a `constexpr`
// constructor, and is safe to use as a namespace-scoped global variable.
class once_flag {
public:
constexpr once_flag() : control_(0) {}
once_flag(const once_flag&) = delete;
once_flag& operator=(const once_flag&) = delete;
private:
friend std::atomic<uint32_t>* base_internal::ControlWord(once_flag* flag);
std::atomic<uint32_t> control_;
};
//------------------------------------------------------------------------------
// End of public interfaces.
// Implementation details follow.
//------------------------------------------------------------------------------
namespace base_internal {
// Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to
// initialize entities used by the scheduler implementation.
template <typename Callable, typename... Args>
void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args);
// Disables scheduling while on stack when scheduling mode is non-cooperative.
// No effect for cooperative scheduling modes.
class SchedulingHelper {
public:
explicit SchedulingHelper(base_internal::SchedulingMode mode) : mode_(mode) {
if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) {
guard_result_ = base_internal::SchedulingGuard::DisableRescheduling();
}
}
~SchedulingHelper() {
if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) {
base_internal::SchedulingGuard::EnableRescheduling(guard_result_);
}
}
private:
base_internal::SchedulingMode mode_;
bool guard_result_;
};
// Bit patterns for call_once state machine values. Internal implementation
// detail, not for use by clients.
//
// The bit patterns are arbitrarily chosen from unlikely values, to aid in
// debugging. However, kOnceInit must be 0, so that a zero-initialized
// once_flag will be valid for immediate use.
enum {
kOnceInit = 0,
kOnceRunning = 0x65C2937B,
kOnceWaiter = 0x05A308D2,
// A very small constant is chosen for kOnceDone so that it fit in a single
// compare with immediate instruction for most common ISAs. This is verified
// for x86, POWER and ARM.
kOnceDone = 221, // Random Number
};
template <typename Callable, typename... Args>
void CallOnceImpl(std::atomic<uint32_t>* control,
base_internal::SchedulingMode scheduling_mode, Callable&& fn,
Args&&... args) {
#ifndef NDEBUG
{
uint32_t old_control = control->load(std::memory_order_acquire);
if (old_control != kOnceInit &&
old_control != kOnceRunning &&
old_control != kOnceWaiter &&
old_control != kOnceDone) {
ABSL_RAW_LOG(FATAL, "Unexpected value for control word: 0x%lx",
static_cast<unsigned long>(old_control)); // NOLINT
}
}
#endif // NDEBUG
static const base_internal::SpinLockWaitTransition trans[] = {
{kOnceInit, kOnceRunning, true},
{kOnceRunning, kOnceWaiter, false},
{kOnceDone, kOnceDone, true}};
// Must do this before potentially modifying control word's state.
base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode);
// Short circuit the simplest case to avoid procedure call overhead.
uint32_t old_control = kOnceInit;
if (control->compare_exchange_strong(old_control, kOnceRunning,
std::memory_order_acquire,
std::memory_order_relaxed) ||
base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans,
scheduling_mode) == kOnceInit) {
base_internal::Invoke(std::forward<Callable>(fn),
std::forward<Args>(args)...);
old_control = control->load(std::memory_order_relaxed);
control->store(base_internal::kOnceDone, std::memory_order_release);
if (old_control == base_internal::kOnceWaiter) {
base_internal::SpinLockWake(control, true);
}
} // else *control is already kOnceDone
}
inline std::atomic<uint32_t>* ControlWord(once_flag* flag) {
return &flag->control_;
}
template <typename Callable, typename... Args>
void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args) {
std::atomic<uint32_t>* once = base_internal::ControlWord(flag);
uint32_t s = once->load(std::memory_order_acquire);
if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) {
base_internal::CallOnceImpl(once, base_internal::SCHEDULE_KERNEL_ONLY,
std::forward<Callable>(fn),
std::forward<Args>(args)...);
}
}
} // namespace base_internal
template <typename Callable, typename... Args>
void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args) {
std::atomic<uint32_t>* once = base_internal::ControlWord(&flag);
uint32_t s = once->load(std::memory_order_acquire);
if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) {
base_internal::CallOnceImpl(
once, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL,
std::forward<Callable>(fn), std::forward<Args>(args)...);
}
}
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_CALL_ONCE_H_

View File

@@ -0,0 +1,107 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/call_once.h"
#include <thread>
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/base/thread_annotations.h"
#include "absl/synchronization/mutex.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace {
absl::once_flag once;
ABSL_CONST_INIT Mutex counters_mu(absl::kConstInit);
int running_thread_count GUARDED_BY(counters_mu) = 0;
int call_once_invoke_count GUARDED_BY(counters_mu) = 0;
int call_once_finished_count GUARDED_BY(counters_mu) = 0;
int call_once_return_count GUARDED_BY(counters_mu) = 0;
bool done_blocking GUARDED_BY(counters_mu) = false;
// Function to be called from absl::call_once. Waits for a notification.
void WaitAndIncrement() {
counters_mu.Lock();
++call_once_invoke_count;
counters_mu.Unlock();
counters_mu.LockWhen(Condition(&done_blocking));
++call_once_finished_count;
counters_mu.Unlock();
}
void ThreadBody() {
counters_mu.Lock();
++running_thread_count;
counters_mu.Unlock();
absl::call_once(once, WaitAndIncrement);
counters_mu.Lock();
++call_once_return_count;
counters_mu.Unlock();
}
// Returns true if all threads are set up for the test.
bool ThreadsAreSetup(void*) EXCLUSIVE_LOCKS_REQUIRED(counters_mu) {
// All ten threads must be running, and WaitAndIncrement should be blocked.
return running_thread_count == 10 && call_once_invoke_count == 1;
}
TEST(CallOnceTest, ExecutionCount) {
std::vector<std::thread> threads;
// Start 10 threads all calling call_once on the same once_flag.
for (int i = 0; i < 10; ++i) {
threads.emplace_back(ThreadBody);
}
// Wait until all ten threads have started, and WaitAndIncrement has been
// invoked.
counters_mu.LockWhen(Condition(ThreadsAreSetup, nullptr));
// WaitAndIncrement should have been invoked by exactly one call_once()
// instance. That thread should be blocking on a notification, and all other
// call_once instances should be blocking as well.
EXPECT_EQ(call_once_invoke_count, 1);
EXPECT_EQ(call_once_finished_count, 0);
EXPECT_EQ(call_once_return_count, 0);
// Allow WaitAndIncrement to finish executing. Once it does, the other
// call_once waiters will be unblocked.
done_blocking = true;
counters_mu.Unlock();
for (std::thread& thread : threads) {
thread.join();
}
counters_mu.Lock();
EXPECT_EQ(call_once_invoke_count, 1);
EXPECT_EQ(call_once_finished_count, 1);
EXPECT_EQ(call_once_return_count, 10);
counters_mu.Unlock();
}
} // namespace
} // inline namespace lts_2019_08_08
} // namespace absl

184
ubuntu/absl/base/casts.h Normal file
View File

@@ -0,0 +1,184 @@
//
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: casts.h
// -----------------------------------------------------------------------------
//
// This header file defines casting templates to fit use cases not covered by
// the standard casts provided in the C++ standard. As with all cast operations,
// use these with caution and only if alternatives do not exist.
#ifndef ABSL_BASE_CASTS_H_
#define ABSL_BASE_CASTS_H_
#include <cstring>
#include <memory>
#include <type_traits>
#include <utility>
#include "absl/base/internal/identity.h"
#include "absl/base/macros.h"
#include "absl/meta/type_traits.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace internal_casts {
template <class Dest, class Source>
struct is_bitcastable
: std::integral_constant<
bool,
sizeof(Dest) == sizeof(Source) &&
type_traits_internal::is_trivially_copyable<Source>::value &&
type_traits_internal::is_trivially_copyable<Dest>::value &&
std::is_default_constructible<Dest>::value> {};
} // namespace internal_casts
// implicit_cast()
//
// Performs an implicit conversion between types following the language
// rules for implicit conversion; if an implicit conversion is otherwise
// allowed by the language in the given context, this function performs such an
// implicit conversion.
//
// Example:
//
// // If the context allows implicit conversion:
// From from;
// To to = from;
//
// // Such code can be replaced by:
// implicit_cast<To>(from);
//
// An `implicit_cast()` may also be used to annotate numeric type conversions
// that, although safe, may produce compiler warnings (such as `long` to `int`).
// Additionally, an `implicit_cast()` is also useful within return statements to
// indicate a specific implicit conversion is being undertaken.
//
// Example:
//
// return implicit_cast<double>(size_in_bytes) / capacity_;
//
// Annotating code with `implicit_cast()` allows you to explicitly select
// particular overloads and template instantiations, while providing a safer
// cast than `reinterpret_cast()` or `static_cast()`.
//
// Additionally, an `implicit_cast()` can be used to allow upcasting within a
// type hierarchy where incorrect use of `static_cast()` could accidentally
// allow downcasting.
//
// Finally, an `implicit_cast()` can be used to perform implicit conversions
// from unrelated types that otherwise couldn't be implicitly cast directly;
// C++ will normally only implicitly cast "one step" in such conversions.
//
// That is, if C is a type which can be implicitly converted to B, with B being
// a type that can be implicitly converted to A, an `implicit_cast()` can be
// used to convert C to B (which the compiler can then implicitly convert to A
// using language rules).
//
// Example:
//
// // Assume an object C is convertible to B, which is implicitly convertible
// // to A
// A a = implicit_cast<B>(C);
//
// Such implicit cast chaining may be useful within template logic.
template <typename To>
constexpr To implicit_cast(typename absl::internal::identity_t<To> to) {
return to;
}
// bit_cast()
//
// Performs a bitwise cast on a type without changing the underlying bit
// representation of that type's value. The two types must be of the same size
// and both types must be trivially copyable. As with most casts, use with
// caution. A `bit_cast()` might be needed when you need to temporarily treat a
// type as some other type, such as in the following cases:
//
// * Serialization (casting temporarily to `char *` for those purposes is
// always allowed by the C++ standard)
// * Managing the individual bits of a type within mathematical operations
// that are not normally accessible through that type
// * Casting non-pointer types to pointer types (casting the other way is
// allowed by `reinterpret_cast()` but round-trips cannot occur the other
// way).
//
// Example:
//
// float f = 3.14159265358979;
// int i = bit_cast<int32_t>(f);
// // i = 0x40490fdb
//
// Casting non-pointer types to pointer types and then dereferencing them
// traditionally produces undefined behavior.
//
// Example:
//
// // WRONG
// float f = 3.14159265358979; // WRONG
// int i = * reinterpret_cast<int*>(&f); // WRONG
//
// The address-casting method produces undefined behavior according to the ISO
// C++ specification section [basic.lval]. Roughly, this section says: if an
// object in memory has one type, and a program accesses it with a different
// type, the result is undefined behavior for most values of "different type".
//
// Such casting results in type punning: holding an object in memory of one type
// and reading its bits back using a different type. A `bit_cast()` avoids this
// issue by implementing its casts using `memcpy()`, which avoids introducing
// this undefined behavior.
//
// NOTE: The requirements here are more strict than the bit_cast of standard
// proposal p0476 due to the need for workarounds and lack of intrinsics.
// Specifically, this implementation also requires `Dest` to be
// default-constructible.
template <
typename Dest, typename Source,
typename std::enable_if<internal_casts::is_bitcastable<Dest, Source>::value,
int>::type = 0>
inline Dest bit_cast(const Source& source) {
Dest dest;
memcpy(static_cast<void*>(std::addressof(dest)),
static_cast<const void*>(std::addressof(source)), sizeof(dest));
return dest;
}
// NOTE: This overload is only picked if the requirements of bit_cast are not
// met. It is therefore UB, but is provided temporarily as previous versions of
// this function template were unchecked. Do not use this in new code.
template <
typename Dest, typename Source,
typename std::enable_if<
!internal_casts::is_bitcastable<Dest, Source>::value, int>::type = 0>
ABSL_DEPRECATED(
"absl::bit_cast type requirements were violated. Update the types being "
"used such that they are the same size and are both TriviallyCopyable.")
inline Dest bit_cast(const Source& source) {
static_assert(sizeof(Dest) == sizeof(Source),
"Source and destination types should have equal sizes.");
Dest dest;
memcpy(&dest, &source, sizeof(dest));
return dest;
}
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_CASTS_H_

472
ubuntu/absl/base/config.h Normal file
View File

@@ -0,0 +1,472 @@
//
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: config.h
// -----------------------------------------------------------------------------
//
// This header file defines a set of macros for checking the presence of
// important compiler and platform features. Such macros can be used to
// produce portable code by parameterizing compilation based on the presence or
// lack of a given feature.
//
// We define a "feature" as some interface we wish to program to: for example,
// a library function or system call. A value of `1` indicates support for
// that feature; any other value indicates the feature support is undefined.
//
// Example:
//
// Suppose a programmer wants to write a program that uses the 'mmap()' system
// call. The Abseil macro for that feature (`ABSL_HAVE_MMAP`) allows you to
// selectively include the `mmap.h` header and bracket code using that feature
// in the macro:
//
// #include "absl/base/config.h"
//
// #ifdef ABSL_HAVE_MMAP
// #include "sys/mman.h"
// #endif //ABSL_HAVE_MMAP
//
// ...
// #ifdef ABSL_HAVE_MMAP
// void *ptr = mmap(...);
// ...
// #endif // ABSL_HAVE_MMAP
#ifndef ABSL_BASE_CONFIG_H_
#define ABSL_BASE_CONFIG_H_
// Included for the __GLIBC__ macro (or similar macros on other systems).
#include <limits.h>
#ifdef __cplusplus
// Included for __GLIBCXX__, _LIBCPP_VERSION
#include <cstddef>
#endif // __cplusplus
#if defined(__APPLE__)
// Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED,
// __IPHONE_8_0.
#include <Availability.h>
#include <TargetConditionals.h>
#endif
#include "absl/base/policy_checks.h"
// -----------------------------------------------------------------------------
// Compiler Feature Checks
// -----------------------------------------------------------------------------
// ABSL_HAVE_BUILTIN()
//
// Checks whether the compiler supports a Clang Feature Checking Macro, and if
// so, checks whether it supports the provided builtin function "x" where x
// is one of the functions noted in
// https://clang.llvm.org/docs/LanguageExtensions.html
//
// Note: Use this macro to avoid an extra level of #ifdef __has_builtin check.
// http://releases.llvm.org/3.3/tools/clang/docs/LanguageExtensions.html
#ifdef __has_builtin
#define ABSL_HAVE_BUILTIN(x) __has_builtin(x)
#else
#define ABSL_HAVE_BUILTIN(x) 0
#endif
// ABSL_HAVE_TLS is defined to 1 when __thread should be supported.
// We assume __thread is supported on Linux when compiled with Clang or compiled
// against libstdc++ with _GLIBCXX_HAVE_TLS defined.
#ifdef ABSL_HAVE_TLS
#error ABSL_HAVE_TLS cannot be directly set
#elif defined(__linux__) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
#define ABSL_HAVE_TLS 1
#endif
// ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
//
// Checks whether `std::is_trivially_destructible<T>` is supported.
//
// Notes: All supported compilers using libc++ support this feature, as does
// gcc >= 4.8.1 using libstdc++, and Visual Studio.
#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
#error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set
#elif defined(_LIBCPP_VERSION) || \
(!defined(__clang__) && defined(__GNUC__) && defined(__GLIBCXX__) && \
(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) || \
defined(_MSC_VER)
#define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1
#endif
// ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
//
// Checks whether `std::is_trivially_default_constructible<T>` and
// `std::is_trivially_copy_constructible<T>` are supported.
// ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
//
// Checks whether `std::is_trivially_copy_assignable<T>` is supported.
// Notes: Clang with libc++ supports these features, as does gcc >= 5.1 with
// either libc++ or libstdc++, and Visual Studio (but not NVCC).
#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE)
#error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set
#elif defined(ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE)
#error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set
#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \
(!defined(__clang__) && defined(__GNUC__) && \
(__GNUC__ > 5 || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)) && \
(defined(_LIBCPP_VERSION) || defined(__GLIBCXX__))) || \
(defined(_MSC_VER) && !defined(__NVCC__))
#define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1
#define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1
#endif
// ABSL_HAVE_THREAD_LOCAL
//
// Checks whether C++11's `thread_local` storage duration specifier is
// supported.
#ifdef ABSL_HAVE_THREAD_LOCAL
#error ABSL_HAVE_THREAD_LOCAL cannot be directly set
#elif defined(__APPLE__)
// Notes:
// * Xcode's clang did not support `thread_local` until version 8, and
// even then not for all iOS < 9.0.
// * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator
// targeting iOS 9.x.
// * Xcode 10 moves the deployment target check for iOS < 9.0 to link time
// making __has_feature unreliable there.
//
// Otherwise, `__has_feature` is only supported by Clang so it has be inside
// `defined(__APPLE__)` check.
#if __has_feature(cxx_thread_local) && \
!(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
#define ABSL_HAVE_THREAD_LOCAL 1
#endif
#else // !defined(__APPLE__)
#define ABSL_HAVE_THREAD_LOCAL 1
#endif
// There are platforms for which TLS should not be used even though the compiler
// makes it seem like it's supported (Android NDK < r12b for example).
// This is primarily because of linker problems and toolchain misconfiguration:
// Abseil does not intend to support this indefinitely. Currently, the newest
// toolchain that we intend to support that requires this behavior is the
// r11 NDK - allowing for a 5 year support window on that means this option
// is likely to be removed around June of 2021.
// TLS isn't supported until NDK r12b per
// https://developer.android.com/ndk/downloads/revision_history.html
// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in
// <android/ndk-version.h>. For NDK < r16, users should define these macros,
// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11.
#if defined(__ANDROID__) && defined(__clang__)
#if __has_include(<android/ndk-version.h>)
#include <android/ndk-version.h>
#endif // __has_include(<android/ndk-version.h>)
#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \
defined(__NDK_MINOR__) && \
((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1)))
#undef ABSL_HAVE_TLS
#undef ABSL_HAVE_THREAD_LOCAL
#endif
#endif // defined(__ANDROID__) && defined(__clang__)
// Emscripten doesn't yet support `thread_local` or `__thread`.
// https://github.com/emscripten-core/emscripten/issues/3502
#if defined(__EMSCRIPTEN__)
#undef ABSL_HAVE_TLS
#undef ABSL_HAVE_THREAD_LOCAL
#endif // defined(__EMSCRIPTEN__)
// ABSL_HAVE_INTRINSIC_INT128
//
// Checks whether the __int128 compiler extension for a 128-bit integral type is
// supported.
//
// Note: __SIZEOF_INT128__ is defined by Clang and GCC when __int128 is
// supported, but we avoid using it in certain cases:
// * On Clang:
// * Building using Clang for Windows, where the Clang runtime library has
// 128-bit support only on LP64 architectures, but Windows is LLP64.
// * On Nvidia's nvcc:
// * nvcc also defines __GNUC__ and __SIZEOF_INT128__, but not all versions
// actually support __int128.
#ifdef ABSL_HAVE_INTRINSIC_INT128
#error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set
#elif defined(__SIZEOF_INT128__)
#if (defined(__clang__) && !defined(_WIN32)) || \
(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \
(defined(__GNUC__) && !defined(__clang__) && !defined(__CUDACC__))
#define ABSL_HAVE_INTRINSIC_INT128 1
#elif defined(__CUDACC__)
// __CUDACC_VER__ is a full version number before CUDA 9, and is defined to a
// string explaining that it has been removed starting with CUDA 9. We use
// nested #ifs because there is no short-circuiting in the preprocessor.
// NOTE: `__CUDACC__` could be undefined while `__CUDACC_VER__` is defined.
#if __CUDACC_VER__ >= 70000
#define ABSL_HAVE_INTRINSIC_INT128 1
#endif // __CUDACC_VER__ >= 70000
#endif // defined(__CUDACC__)
#endif // ABSL_HAVE_INTRINSIC_INT128
// ABSL_HAVE_EXCEPTIONS
//
// Checks whether the compiler both supports and enables exceptions. Many
// compilers support a "no exceptions" mode that disables exceptions.
//
// Generally, when ABSL_HAVE_EXCEPTIONS is not defined:
//
// * Code using `throw` and `try` may not compile.
// * The `noexcept` specifier will still compile and behave as normal.
// * The `noexcept` operator may still return `false`.
//
// For further details, consult the compiler's documentation.
#ifdef ABSL_HAVE_EXCEPTIONS
#error ABSL_HAVE_EXCEPTIONS cannot be directly set.
#elif defined(__clang__)
// TODO(calabrese)
// Switch to using __cpp_exceptions when we no longer support versions < 3.6.
// For details on this check, see:
// http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro
#if defined(__EXCEPTIONS) && __has_feature(cxx_exceptions)
#define ABSL_HAVE_EXCEPTIONS 1
#endif // defined(__EXCEPTIONS) && __has_feature(cxx_exceptions)
// Handle remaining special cases and default to exceptions being supported.
#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \
!(defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__cpp_exceptions)) && \
!(defined(_MSC_VER) && !defined(_CPPUNWIND))
#define ABSL_HAVE_EXCEPTIONS 1
#endif
// -----------------------------------------------------------------------------
// Platform Feature Checks
// -----------------------------------------------------------------------------
// Currently supported operating systems and associated preprocessor
// symbols:
//
// Linux and Linux-derived __linux__
// Android __ANDROID__ (implies __linux__)
// Linux (non-Android) __linux__ && !__ANDROID__
// Darwin (macOS and iOS) __APPLE__
// Akaros (http://akaros.org) __ros__
// Windows _WIN32
// NaCL __native_client__
// AsmJS __asmjs__
// WebAssembly __wasm__
// Fuchsia __Fuchsia__
//
// Note that since Android defines both __ANDROID__ and __linux__, one
// may probe for either Linux or Android by simply testing for __linux__.
// ABSL_HAVE_MMAP
//
// Checks whether the platform has an mmap(2) implementation as defined in
// POSIX.1-2001.
#ifdef ABSL_HAVE_MMAP
#error ABSL_HAVE_MMAP cannot be directly set
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
defined(__ros__) || defined(__native_client__) || defined(__asmjs__) || \
defined(__wasm__) || defined(__Fuchsia__) || defined(__sun) || \
defined(__ASYLO__)
#define ABSL_HAVE_MMAP 1
#endif
// ABSL_HAVE_PTHREAD_GETSCHEDPARAM
//
// Checks whether the platform implements the pthread_(get|set)schedparam(3)
// functions as defined in POSIX.1-2001.
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
#error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
defined(__ros__)
#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1
#endif
// ABSL_HAVE_SCHED_YIELD
//
// Checks whether the platform implements sched_yield(2) as defined in
// POSIX.1-2001.
#ifdef ABSL_HAVE_SCHED_YIELD
#error ABSL_HAVE_SCHED_YIELD cannot be directly set
#elif defined(__linux__) || defined(__ros__) || defined(__native_client__)
#define ABSL_HAVE_SCHED_YIELD 1
#endif
// ABSL_HAVE_SEMAPHORE_H
//
// Checks whether the platform supports the <semaphore.h> header and sem_open(3)
// family of functions as standardized in POSIX.1-2001.
//
// Note: While Apple provides <semaphore.h> for both iOS and macOS, it is
// explicitly deprecated and will cause build failures if enabled for those
// platforms. We side-step the issue by not defining it here for Apple
// platforms.
#ifdef ABSL_HAVE_SEMAPHORE_H
#error ABSL_HAVE_SEMAPHORE_H cannot be directly set
#elif defined(__linux__) || defined(__ros__)
#define ABSL_HAVE_SEMAPHORE_H 1
#endif
// ABSL_HAVE_ALARM
//
// Checks whether the platform supports the <signal.h> header and alarm(2)
// function as standardized in POSIX.1-2001.
#ifdef ABSL_HAVE_ALARM
#error ABSL_HAVE_ALARM cannot be directly set
#elif defined(__GOOGLE_GRTE_VERSION__)
// feature tests for Google's GRTE
#define ABSL_HAVE_ALARM 1
#elif defined(__GLIBC__)
// feature test for glibc
#define ABSL_HAVE_ALARM 1
#elif defined(_MSC_VER)
// feature tests for Microsoft's library
#elif defined(__EMSCRIPTEN__)
// emscripten doesn't support signals
#elif defined(__native_client__)
#else
// other standard libraries
#define ABSL_HAVE_ALARM 1
#endif
// ABSL_IS_LITTLE_ENDIAN
// ABSL_IS_BIG_ENDIAN
//
// Checks the endianness of the platform.
//
// Notes: uses the built in endian macros provided by GCC (since 4.6) and
// Clang (since 3.2); see
// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html.
// Otherwise, if _WIN32, assume little endian. Otherwise, bail with an error.
#if defined(ABSL_IS_BIG_ENDIAN)
#error "ABSL_IS_BIG_ENDIAN cannot be directly set."
#endif
#if defined(ABSL_IS_LITTLE_ENDIAN)
#error "ABSL_IS_LITTLE_ENDIAN cannot be directly set."
#endif
#if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
#define ABSL_IS_LITTLE_ENDIAN 1
#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define ABSL_IS_BIG_ENDIAN 1
#elif defined(_WIN32)
#define ABSL_IS_LITTLE_ENDIAN 1
#else
#error "absl endian detection needs to be set up for your compiler"
#endif
// macOS 10.13 and iOS 10.11 don't let you use <any>, <optional>, or <variant>
// even though the headers exist and are publicly noted to work. See
// https://github.com/abseil/abseil-cpp/issues/207 and
// https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes
// libc++ spells out the availability requirements in the file
// llvm-project/libcxx/include/__config via the #define
// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS.
#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \
((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \
(defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \
(defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 120000) || \
(defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 50000))
#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1
#else
#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0
#endif
// ABSL_HAVE_STD_ANY
//
// Checks whether C++17 std::any is available by checking whether <any> exists.
#ifdef ABSL_HAVE_STD_ANY
#error "ABSL_HAVE_STD_ANY cannot be directly set."
#endif
#ifdef __has_include
#if __has_include(<any>) && __cplusplus >= 201703L && \
!ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define ABSL_HAVE_STD_ANY 1
#endif
#endif
// ABSL_HAVE_STD_OPTIONAL
//
// Checks whether C++17 std::optional is available.
#ifdef ABSL_HAVE_STD_OPTIONAL
#error "ABSL_HAVE_STD_OPTIONAL cannot be directly set."
#endif
#ifdef __has_include
#if __has_include(<optional>) && __cplusplus >= 201703L && \
!ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define ABSL_HAVE_STD_OPTIONAL 1
#endif
#endif
// ABSL_HAVE_STD_VARIANT
//
// Checks whether C++17 std::variant is available.
#ifdef ABSL_HAVE_STD_VARIANT
#error "ABSL_HAVE_STD_VARIANT cannot be directly set."
#endif
#ifdef __has_include
#if __has_include(<variant>) && __cplusplus >= 201703L && \
!ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define ABSL_HAVE_STD_VARIANT 1
#endif
#endif
// ABSL_HAVE_STD_STRING_VIEW
//
// Checks whether C++17 std::string_view is available.
#ifdef ABSL_HAVE_STD_STRING_VIEW
#error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set."
#endif
#ifdef __has_include
#if __has_include(<string_view>) && __cplusplus >= 201703L
#define ABSL_HAVE_STD_STRING_VIEW 1
#endif
#endif
// For MSVC, `__has_include` is supported in VS 2017 15.3, which is later than
// the support for <optional>, <any>, <string_view>, <variant>. So we use
// _MSC_VER to check whether we have VS 2017 RTM (when <optional>, <any>,
// <string_view>, <variant> is implemented) or higher. Also, `__cplusplus` is
// not correctly set by MSVC, so we use `_MSVC_LANG` to check the language
// version.
// TODO(zhangxy): fix tests before enabling aliasing for `std::any`.
#if defined(_MSC_VER) && _MSC_VER >= 1910 && \
((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || __cplusplus > 201402)
// #define ABSL_HAVE_STD_ANY 1
#define ABSL_HAVE_STD_OPTIONAL 1
#define ABSL_HAVE_STD_VARIANT 1
#define ABSL_HAVE_STD_STRING_VIEW 1
#endif
// In debug mode, MSVC 2017's std::variant throws a EXCEPTION_ACCESS_VIOLATION
// SEH exception from emplace for variant<SomeStruct> when constructing the
// struct can throw. This defeats some of variant_test and
// variant_exception_safety_test.
#if defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_DEBUG)
#define ABSL_INTERNAL_MSVC_2017_DBG_MODE
#endif
#endif // ABSL_BASE_CONFIG_H_

View File

@@ -0,0 +1,60 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/config.h"
#include <cstdint>
#include "gtest/gtest.h"
#include "absl/synchronization/internal/thread_pool.h"
namespace {
TEST(ConfigTest, Endianness) {
union {
uint32_t value;
uint8_t data[sizeof(uint32_t)];
} number;
number.data[0] = 0x00;
number.data[1] = 0x01;
number.data[2] = 0x02;
number.data[3] = 0x03;
#if defined(ABSL_IS_LITTLE_ENDIAN) && defined(ABSL_IS_BIG_ENDIAN)
#error Both ABSL_IS_LITTLE_ENDIAN and ABSL_IS_BIG_ENDIAN are defined
#elif defined(ABSL_IS_LITTLE_ENDIAN)
EXPECT_EQ(UINT32_C(0x03020100), number.value);
#elif defined(ABSL_IS_BIG_ENDIAN)
EXPECT_EQ(UINT32_C(0x00010203), number.value);
#else
#error Unknown endianness
#endif
}
#if defined(ABSL_HAVE_THREAD_LOCAL)
TEST(ConfigTest, ThreadLocal) {
static thread_local int mine_mine_mine = 16;
EXPECT_EQ(16, mine_mine_mine);
{
absl::synchronization_internal::ThreadPool pool(1);
pool.Schedule([&] {
EXPECT_EQ(16, mine_mine_mine);
mine_mine_mine = 32;
EXPECT_EQ(32, mine_mine_mine);
});
}
EXPECT_EQ(16, mine_mine_mine);
}
#endif
} // namespace

View File

@@ -0,0 +1,74 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// kConstInit
// -----------------------------------------------------------------------------
//
// A constructor tag used to mark an object as safe for use as a global
// variable, avoiding the usual lifetime issues that can affect globals.
#ifndef ABSL_BASE_CONST_INIT_H_
#define ABSL_BASE_CONST_INIT_H_
// In general, objects with static storage duration (such as global variables)
// can trigger tricky object lifetime situations. Attempting to access them
// from the constructors or destructors of other global objects can result in
// undefined behavior, unless their constructors and destructors are designed
// with this issue in mind.
//
// The normal way to deal with this issue in C++11 is to use constant
// initialization and trivial destructors.
//
// Constant initialization is guaranteed to occur before any other code
// executes. Constructors that are declared 'constexpr' are eligible for
// constant initialization. You can annotate a variable declaration with the
// ABSL_CONST_INIT macro to express this intent. For compilers that support
// it, this annotation will cause a compilation error for declarations that
// aren't subject to constant initialization (perhaps because a runtime value
// was passed as a constructor argument).
//
// On program shutdown, lifetime issues can be avoided on global objects by
// ensuring that they contain trivial destructors. A class has a trivial
// destructor unless it has a user-defined destructor, a virtual method or base
// class, or a data member or base class with a non-trivial destructor of its
// own. Objects with static storage duration and a trivial destructor are not
// cleaned up on program shutdown, and are thus safe to access from other code
// running during shutdown.
//
// For a few core Abseil classes, we make a best effort to allow for safe global
// instances, even though these classes have non-trivial destructors. These
// objects can be created with the absl::kConstInit tag. For example:
// ABSL_CONST_INIT absl::Mutex global_mutex(absl::kConstInit);
//
// The line above declares a global variable of type absl::Mutex which can be
// accessed at any point during startup or shutdown. global_mutex's destructor
// will still run, but will not invalidate the object. Note that C++ specifies
// that accessing an object after its destructor has run results in undefined
// behavior, but this pattern works on the toolchains we support.
//
// The absl::kConstInit tag should only be used to define objects with static
// or thread_local storage duration.
namespace absl {
inline namespace lts_2019_08_08 {
enum ConstInitType {
kConstInit,
};
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_CONST_INIT_H_

View File

@@ -0,0 +1,129 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdlib.h>
#include <string.h>
#include "absl/base/dynamic_annotations.h"
#ifndef __has_feature
#define __has_feature(x) 0
#endif
/* Compiler-based ThreadSanitizer defines
DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL = 1
and provides its own definitions of the functions. */
#ifndef DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL
# define DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL 0
#endif
/* Each function is empty and called (via a macro) only in debug mode.
The arguments are captured by dynamic tools at runtime. */
#if DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 && !defined(__native_client__)
#if __has_feature(memory_sanitizer)
#include <sanitizer/msan_interface.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
void AnnotateRWLockCreate(const char *, int,
const volatile void *){}
void AnnotateRWLockDestroy(const char *, int,
const volatile void *){}
void AnnotateRWLockAcquired(const char *, int,
const volatile void *, long){}
void AnnotateRWLockReleased(const char *, int,
const volatile void *, long){}
void AnnotateBenignRace(const char *, int,
const volatile void *,
const char *){}
void AnnotateBenignRaceSized(const char *, int,
const volatile void *,
size_t,
const char *) {}
void AnnotateThreadName(const char *, int,
const char *){}
void AnnotateIgnoreReadsBegin(const char *, int){}
void AnnotateIgnoreReadsEnd(const char *, int){}
void AnnotateIgnoreWritesBegin(const char *, int){}
void AnnotateIgnoreWritesEnd(const char *, int){}
void AnnotateEnableRaceDetection(const char *, int, int){}
void AnnotateMemoryIsInitialized(const char *, int,
const volatile void *mem, size_t size) {
#if __has_feature(memory_sanitizer)
__msan_unpoison(mem, size);
#else
(void)mem;
(void)size;
#endif
}
void AnnotateMemoryIsUninitialized(const char *, int,
const volatile void *mem, size_t size) {
#if __has_feature(memory_sanitizer)
__msan_allocated_memory(mem, size);
#else
(void)mem;
(void)size;
#endif
}
static int GetRunningOnValgrind(void) {
#ifdef RUNNING_ON_VALGRIND
if (RUNNING_ON_VALGRIND) return 1;
#endif
char *running_on_valgrind_str = getenv("RUNNING_ON_VALGRIND");
if (running_on_valgrind_str) {
return strcmp(running_on_valgrind_str, "0") != 0;
}
return 0;
}
/* See the comments in dynamic_annotations.h */
int RunningOnValgrind(void) {
static volatile int running_on_valgrind = -1;
int local_running_on_valgrind = running_on_valgrind;
/* C doesn't have thread-safe initialization of statics, and we
don't want to depend on pthread_once here, so hack it. */
ANNOTATE_BENIGN_RACE(&running_on_valgrind, "safe hack");
if (local_running_on_valgrind == -1)
running_on_valgrind = local_running_on_valgrind = GetRunningOnValgrind();
return local_running_on_valgrind;
}
/* See the comments in dynamic_annotations.h */
double ValgrindSlowdown(void) {
/* Same initialization hack as in RunningOnValgrind(). */
static volatile double slowdown = 0.0;
double local_slowdown = slowdown;
ANNOTATE_BENIGN_RACE(&slowdown, "safe hack");
if (RunningOnValgrind() == 0) {
return 1.0;
}
if (local_slowdown == 0.0) {
char *env = getenv("VALGRIND_SLOWDOWN");
slowdown = local_slowdown = env ? atof(env) : 50.0;
}
return local_slowdown;
}
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */

View File

@@ -0,0 +1,389 @@
/*
* Copyright 2017 The Abseil Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* This file defines dynamic annotations for use with dynamic analysis
tool such as valgrind, PIN, etc.
Dynamic annotation is a source code annotation that affects
the generated code (that is, the annotation is not a comment).
Each such annotation is attached to a particular
instruction and/or to a particular object (address) in the program.
The annotations that should be used by users are macros in all upper-case
(e.g., ANNOTATE_THREAD_NAME).
Actual implementation of these macros may differ depending on the
dynamic analysis tool being used.
This file supports the following configurations:
- Dynamic Annotations enabled (with static thread-safety warnings disabled).
In this case, macros expand to functions implemented by Thread Sanitizer,
when building with TSan. When not provided an external implementation,
dynamic_annotations.cc provides no-op implementations.
- Static Clang thread-safety warnings enabled.
When building with a Clang compiler that supports thread-safety warnings,
a subset of annotations can be statically-checked at compile-time. We
expand these macros to static-inline functions that can be analyzed for
thread-safety, but afterwards elided when building the final binary.
- All annotations are disabled.
If neither Dynamic Annotations nor Clang thread-safety warnings are
enabled, then all annotation-macros expand to empty. */
#ifndef ABSL_BASE_DYNAMIC_ANNOTATIONS_H_
#define ABSL_BASE_DYNAMIC_ANNOTATIONS_H_
#ifndef DYNAMIC_ANNOTATIONS_ENABLED
# define DYNAMIC_ANNOTATIONS_ENABLED 0
#endif
#if DYNAMIC_ANNOTATIONS_ENABLED != 0
/* -------------------------------------------------------------
Annotations that suppress errors. It is usually better to express the
program's synchronization using the other annotations, but these can
be used when all else fails. */
/* Report that we may have a benign race at "pointer", with size
"sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the
point where "pointer" has been allocated, preferably close to the point
where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. */
#define ANNOTATE_BENIGN_RACE(pointer, description) \
AnnotateBenignRaceSized(__FILE__, __LINE__, pointer, \
sizeof(*(pointer)), description)
/* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
the memory range [address, address+size). */
#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
AnnotateBenignRaceSized(__FILE__, __LINE__, address, size, description)
/* Enable (enable!=0) or disable (enable==0) race detection for all threads.
This annotation could be useful if you want to skip expensive race analysis
during some period of program execution, e.g. during initialization. */
#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
AnnotateEnableRaceDetection(__FILE__, __LINE__, enable)
/* -------------------------------------------------------------
Annotations useful for debugging. */
/* Report the current thread name to a race detector. */
#define ANNOTATE_THREAD_NAME(name) \
AnnotateThreadName(__FILE__, __LINE__, name)
/* -------------------------------------------------------------
Annotations useful when implementing locks. They are not
normally needed by modules that merely use locks.
The "lock" argument is a pointer to the lock object. */
/* Report that a lock has been created at address "lock". */
#define ANNOTATE_RWLOCK_CREATE(lock) \
AnnotateRWLockCreate(__FILE__, __LINE__, lock)
/* Report that a linker initialized lock has been created at address "lock".
*/
#ifdef THREAD_SANITIZER
#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
AnnotateRWLockCreateStatic(__FILE__, __LINE__, lock)
#else
#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock)
#endif
/* Report that the lock at address "lock" is about to be destroyed. */
#define ANNOTATE_RWLOCK_DESTROY(lock) \
AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
/* Report that the lock at address "lock" has been acquired.
is_w=1 for writer lock, is_w=0 for reader lock. */
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
/* Report that the lock at address "lock" is about to be released. */
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
#define ANNOTATE_RWLOCK_CREATE(lock) /* empty */
#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) /* empty */
#define ANNOTATE_RWLOCK_DESTROY(lock) /* empty */
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */
#define ANNOTATE_BENIGN_RACE(address, description) /* empty */
#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */
#define ANNOTATE_THREAD_NAME(name) /* empty */
#define ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */
#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
/* These annotations are also made available to LLVM's Memory Sanitizer */
#if DYNAMIC_ANNOTATIONS_ENABLED == 1 || defined(MEMORY_SANITIZER)
#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
AnnotateMemoryIsInitialized(__FILE__, __LINE__, address, size)
#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
AnnotateMemoryIsUninitialized(__FILE__, __LINE__, address, size)
#else
#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) /* empty */
#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) /* empty */
#endif /* DYNAMIC_ANNOTATIONS_ENABLED || MEMORY_SANITIZER */
/* TODO(delesley) -- Replace __CLANG_SUPPORT_DYN_ANNOTATION__ with the
appropriate feature ID. */
#if defined(__clang__) && (!defined(SWIG)) \
&& defined(__CLANG_SUPPORT_DYN_ANNOTATION__)
#if DYNAMIC_ANNOTATIONS_ENABLED == 0
#define ANNOTALYSIS_ENABLED
#endif
/* When running in opt-mode, GCC will issue a warning, if these attributes are
compiled. Only include them when compiling using Clang. */
#define ATTRIBUTE_IGNORE_READS_BEGIN \
__attribute((exclusive_lock_function("*")))
#define ATTRIBUTE_IGNORE_READS_END \
__attribute((unlock_function("*")))
#else
#define ATTRIBUTE_IGNORE_READS_BEGIN /* empty */
#define ATTRIBUTE_IGNORE_READS_END /* empty */
#endif /* defined(__clang__) && ... */
#if (DYNAMIC_ANNOTATIONS_ENABLED != 0) || defined(ANNOTALYSIS_ENABLED)
#define ANNOTATIONS_ENABLED
#endif
#if (DYNAMIC_ANNOTATIONS_ENABLED != 0)
/* Request the analysis tool to ignore all reads in the current thread
until ANNOTATE_IGNORE_READS_END is called.
Useful to ignore intentional racey reads, while still checking
other reads and all writes.
See also ANNOTATE_UNPROTECTED_READ. */
#define ANNOTATE_IGNORE_READS_BEGIN() \
AnnotateIgnoreReadsBegin(__FILE__, __LINE__)
/* Stop ignoring reads. */
#define ANNOTATE_IGNORE_READS_END() \
AnnotateIgnoreReadsEnd(__FILE__, __LINE__)
/* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. */
#define ANNOTATE_IGNORE_WRITES_BEGIN() \
AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
/* Stop ignoring writes. */
#define ANNOTATE_IGNORE_WRITES_END() \
AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
/* Clang provides limited support for static thread-safety analysis
through a feature called Annotalysis. We configure macro-definitions
according to whether Annotalysis support is available. */
#elif defined(ANNOTALYSIS_ENABLED)
#define ANNOTATE_IGNORE_READS_BEGIN() \
StaticAnnotateIgnoreReadsBegin(__FILE__, __LINE__)
#define ANNOTATE_IGNORE_READS_END() \
StaticAnnotateIgnoreReadsEnd(__FILE__, __LINE__)
#define ANNOTATE_IGNORE_WRITES_BEGIN() \
StaticAnnotateIgnoreWritesBegin(__FILE__, __LINE__)
#define ANNOTATE_IGNORE_WRITES_END() \
StaticAnnotateIgnoreWritesEnd(__FILE__, __LINE__)
#else
#define ANNOTATE_IGNORE_READS_BEGIN() /* empty */
#define ANNOTATE_IGNORE_READS_END() /* empty */
#define ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */
#define ANNOTATE_IGNORE_WRITES_END() /* empty */
#endif
/* Implement the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more
primitive annotations defined above. */
#if defined(ANNOTATIONS_ENABLED)
/* Start ignoring all memory accesses (both reads and writes). */
#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
do { \
ANNOTATE_IGNORE_READS_BEGIN(); \
ANNOTATE_IGNORE_WRITES_BEGIN(); \
}while (0)
/* Stop ignoring both reads and writes. */
#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
do { \
ANNOTATE_IGNORE_WRITES_END(); \
ANNOTATE_IGNORE_READS_END(); \
}while (0)
#else
#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */
#define ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */
#endif
/* Use the macros above rather than using these functions directly. */
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
void AnnotateRWLockCreate(const char *file, int line,
const volatile void *lock);
void AnnotateRWLockCreateStatic(const char *file, int line,
const volatile void *lock);
void AnnotateRWLockDestroy(const char *file, int line,
const volatile void *lock);
void AnnotateRWLockAcquired(const char *file, int line,
const volatile void *lock, long is_w); /* NOLINT */
void AnnotateRWLockReleased(const char *file, int line,
const volatile void *lock, long is_w); /* NOLINT */
void AnnotateBenignRace(const char *file, int line,
const volatile void *address,
const char *description);
void AnnotateBenignRaceSized(const char *file, int line,
const volatile void *address,
size_t size,
const char *description);
void AnnotateThreadName(const char *file, int line,
const char *name);
void AnnotateEnableRaceDetection(const char *file, int line, int enable);
void AnnotateMemoryIsInitialized(const char *file, int line,
const volatile void *mem, size_t size);
void AnnotateMemoryIsUninitialized(const char *file, int line,
const volatile void *mem, size_t size);
/* Annotations expand to these functions, when Dynamic Annotations are enabled.
These functions are either implemented as no-op calls, if no Sanitizer is
attached, or provided with externally-linked implementations by a library
like ThreadSanitizer. */
void AnnotateIgnoreReadsBegin(const char *file, int line)
ATTRIBUTE_IGNORE_READS_BEGIN;
void AnnotateIgnoreReadsEnd(const char *file, int line)
ATTRIBUTE_IGNORE_READS_END;
void AnnotateIgnoreWritesBegin(const char *file, int line);
void AnnotateIgnoreWritesEnd(const char *file, int line);
#if defined(ANNOTALYSIS_ENABLED)
/* When Annotalysis is enabled without Dynamic Annotations, the use of
static-inline functions allows the annotations to be read at compile-time,
while still letting the compiler elide the functions from the final build.
TODO(delesley) -- The exclusive lock here ignores writes as well, but
allows IGNORE_READS_AND_WRITES to work properly. */
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
static inline void StaticAnnotateIgnoreReadsBegin(const char *file, int line)
ATTRIBUTE_IGNORE_READS_BEGIN { (void)file; (void)line; }
static inline void StaticAnnotateIgnoreReadsEnd(const char *file, int line)
ATTRIBUTE_IGNORE_READS_END { (void)file; (void)line; }
static inline void StaticAnnotateIgnoreWritesBegin(
const char *file, int line) { (void)file; (void)line; }
static inline void StaticAnnotateIgnoreWritesEnd(
const char *file, int line) { (void)file; (void)line; }
#pragma GCC diagnostic pop
#endif
/* Return non-zero value if running under valgrind.
If "valgrind.h" is included into dynamic_annotations.cc,
the regular valgrind mechanism will be used.
See http://valgrind.org/docs/manual/manual-core-adv.html about
RUNNING_ON_VALGRIND and other valgrind "client requests".
The file "valgrind.h" may be obtained by doing
svn co svn://svn.valgrind.org/valgrind/trunk/include
If for some reason you can't use "valgrind.h" or want to fake valgrind,
there are two ways to make this function return non-zero:
- Use environment variable: export RUNNING_ON_VALGRIND=1
- Make your tool intercept the function RunningOnValgrind() and
change its return value.
*/
int RunningOnValgrind(void);
/* ValgrindSlowdown returns:
* 1.0, if (RunningOnValgrind() == 0)
* 50.0, if (RunningOnValgrind() != 0 && getenv("VALGRIND_SLOWDOWN") == NULL)
* atof(getenv("VALGRIND_SLOWDOWN")) otherwise
This function can be used to scale timeout values:
EXAMPLE:
for (;;) {
DoExpensiveBackgroundTask();
SleepForSeconds(5 * ValgrindSlowdown());
}
*/
double ValgrindSlowdown(void);
#ifdef __cplusplus
}
#endif
/* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
Instead of doing
ANNOTATE_IGNORE_READS_BEGIN();
... = x;
ANNOTATE_IGNORE_READS_END();
one can use
... = ANNOTATE_UNPROTECTED_READ(x); */
#if defined(__cplusplus) && defined(ANNOTATIONS_ENABLED)
template <typename T>
inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x) { /* NOLINT */
ANNOTATE_IGNORE_READS_BEGIN();
T res = x;
ANNOTATE_IGNORE_READS_END();
return res;
}
#else
#define ANNOTATE_UNPROTECTED_READ(x) (x)
#endif
#if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus)
/* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */
#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
namespace { \
class static_var ## _annotator { \
public: \
static_var ## _annotator() { \
ANNOTATE_BENIGN_RACE_SIZED(&static_var, \
sizeof(static_var), \
# static_var ": " description); \
} \
}; \
static static_var ## _annotator the ## static_var ## _annotator;\
} // namespace
#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) /* empty */
#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
#ifdef ADDRESS_SANITIZER
/* Describe the current state of a contiguous container such as e.g.
* std::vector or std::string. For more details see
* sanitizer/common_interface_defs.h, which is provided by the compiler. */
#include <sanitizer/common_interface_defs.h>
#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \
__sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid)
#define ADDRESS_SANITIZER_REDZONE(name) \
struct { char x[8] __attribute__ ((aligned (8))); } name
#else
#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid)
#define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "")
#endif // ADDRESS_SANITIZER
/* Undefine the macros intended only in this file. */
#undef ANNOTALYSIS_ENABLED
#undef ANNOTATIONS_ENABLED
#undef ATTRIBUTE_IGNORE_READS_BEGIN
#undef ATTRIBUTE_IGNORE_READS_END
#endif /* ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ */

View File

@@ -0,0 +1,954 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/exception_safety_testing.h"
#include <cstddef>
#include <exception>
#include <iostream>
#include <list>
#include <type_traits>
#include <vector>
#include "gtest/gtest-spi.h"
#include "gtest/gtest.h"
#include "absl/memory/memory.h"
namespace testing {
namespace {
using ::testing::exceptions_internal::SetCountdown;
using ::testing::exceptions_internal::TestException;
using ::testing::exceptions_internal::UnsetCountdown;
// EXPECT_NO_THROW can't inspect the thrown inspection in general.
template <typename F>
void ExpectNoThrow(const F& f) {
try {
f();
} catch (const TestException& e) {
ADD_FAILURE() << "Unexpected exception thrown from " << e.what();
}
}
TEST(ThrowingValueTest, Throws) {
SetCountdown();
EXPECT_THROW(ThrowingValue<> bomb, TestException);
// It's not guaranteed that every operator only throws *once*. The default
// ctor only throws once, though, so use it to make sure we only throw when
// the countdown hits 0
SetCountdown(2);
ExpectNoThrow([]() { ThrowingValue<> bomb; });
ExpectNoThrow([]() { ThrowingValue<> bomb; });
EXPECT_THROW(ThrowingValue<> bomb, TestException);
UnsetCountdown();
}
// Tests that an operation throws when the countdown is at 0, doesn't throw when
// the countdown doesn't hit 0, and doesn't modify the state of the
// ThrowingValue if it throws
template <typename F>
void TestOp(const F& f) {
ExpectNoThrow(f);
SetCountdown();
EXPECT_THROW(f(), TestException);
UnsetCountdown();
}
TEST(ThrowingValueTest, ThrowingCtors) {
ThrowingValue<> bomb;
TestOp([]() { ThrowingValue<> bomb(1); });
TestOp([&]() { ThrowingValue<> bomb1 = bomb; });
TestOp([&]() { ThrowingValue<> bomb1 = std::move(bomb); });
}
TEST(ThrowingValueTest, ThrowingAssignment) {
ThrowingValue<> bomb, bomb1;
TestOp([&]() { bomb = bomb1; });
TestOp([&]() { bomb = std::move(bomb1); });
// Test that when assignment throws, the assignment should fail (lhs != rhs)
// and strong guarantee fails (lhs != lhs_copy).
{
ThrowingValue<> lhs(39), rhs(42);
ThrowingValue<> lhs_copy(lhs);
SetCountdown();
EXPECT_THROW(lhs = rhs, TestException);
UnsetCountdown();
EXPECT_NE(lhs, rhs);
EXPECT_NE(lhs_copy, lhs);
}
{
ThrowingValue<> lhs(39), rhs(42);
ThrowingValue<> lhs_copy(lhs), rhs_copy(rhs);
SetCountdown();
EXPECT_THROW(lhs = std::move(rhs), TestException);
UnsetCountdown();
EXPECT_NE(lhs, rhs_copy);
EXPECT_NE(lhs_copy, lhs);
}
}
TEST(ThrowingValueTest, ThrowingComparisons) {
ThrowingValue<> bomb1, bomb2;
TestOp([&]() { return bomb1 == bomb2; });
TestOp([&]() { return bomb1 != bomb2; });
TestOp([&]() { return bomb1 < bomb2; });
TestOp([&]() { return bomb1 <= bomb2; });
TestOp([&]() { return bomb1 > bomb2; });
TestOp([&]() { return bomb1 >= bomb2; });
}
TEST(ThrowingValueTest, ThrowingArithmeticOps) {
ThrowingValue<> bomb1(1), bomb2(2);
TestOp([&bomb1]() { +bomb1; });
TestOp([&bomb1]() { -bomb1; });
TestOp([&bomb1]() { ++bomb1; });
TestOp([&bomb1]() { bomb1++; });
TestOp([&bomb1]() { --bomb1; });
TestOp([&bomb1]() { bomb1--; });
TestOp([&]() { bomb1 + bomb2; });
TestOp([&]() { bomb1 - bomb2; });
TestOp([&]() { bomb1* bomb2; });
TestOp([&]() { bomb1 / bomb2; });
TestOp([&]() { bomb1 << 1; });
TestOp([&]() { bomb1 >> 1; });
}
TEST(ThrowingValueTest, ThrowingLogicalOps) {
ThrowingValue<> bomb1, bomb2;
TestOp([&bomb1]() { !bomb1; });
TestOp([&]() { bomb1&& bomb2; });
TestOp([&]() { bomb1 || bomb2; });
}
TEST(ThrowingValueTest, ThrowingBitwiseOps) {
ThrowingValue<> bomb1, bomb2;
TestOp([&bomb1]() { ~bomb1; });
TestOp([&]() { bomb1& bomb2; });
TestOp([&]() { bomb1 | bomb2; });
TestOp([&]() { bomb1 ^ bomb2; });
}
TEST(ThrowingValueTest, ThrowingCompoundAssignmentOps) {
ThrowingValue<> bomb1(1), bomb2(2);
TestOp([&]() { bomb1 += bomb2; });
TestOp([&]() { bomb1 -= bomb2; });
TestOp([&]() { bomb1 *= bomb2; });
TestOp([&]() { bomb1 /= bomb2; });
TestOp([&]() { bomb1 %= bomb2; });
TestOp([&]() { bomb1 &= bomb2; });
TestOp([&]() { bomb1 |= bomb2; });
TestOp([&]() { bomb1 ^= bomb2; });
TestOp([&]() { bomb1 *= bomb2; });
}
TEST(ThrowingValueTest, ThrowingStreamOps) {
ThrowingValue<> bomb;
TestOp([&]() {
std::istringstream stream;
stream >> bomb;
});
TestOp([&]() {
std::stringstream stream;
stream << bomb;
});
}
// Tests the operator<< of ThrowingValue by forcing ConstructorTracker to emit
// a nonfatal failure that contains the string representation of the Thrower
TEST(ThrowingValueTest, StreamOpsOutput) {
using ::testing::TypeSpec;
exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown);
// Test default spec list (kEverythingThrows)
EXPECT_NONFATAL_FAILURE(
{
using Thrower = ThrowingValue<TypeSpec{}>;
auto thrower = Thrower(123);
thrower.~Thrower();
},
"ThrowingValue<>(123)");
// Test with one item in spec list (kNoThrowCopy)
EXPECT_NONFATAL_FAILURE(
{
using Thrower = ThrowingValue<TypeSpec::kNoThrowCopy>;
auto thrower = Thrower(234);
thrower.~Thrower();
},
"ThrowingValue<kNoThrowCopy>(234)");
// Test with multiple items in spec list (kNoThrowMove, kNoThrowNew)
EXPECT_NONFATAL_FAILURE(
{
using Thrower =
ThrowingValue<TypeSpec::kNoThrowMove | TypeSpec::kNoThrowNew>;
auto thrower = Thrower(345);
thrower.~Thrower();
},
"ThrowingValue<kNoThrowMove | kNoThrowNew>(345)");
// Test with all items in spec list (kNoThrowCopy, kNoThrowMove, kNoThrowNew)
EXPECT_NONFATAL_FAILURE(
{
using Thrower = ThrowingValue<static_cast<TypeSpec>(-1)>;
auto thrower = Thrower(456);
thrower.~Thrower();
},
"ThrowingValue<kNoThrowCopy | kNoThrowMove | kNoThrowNew>(456)");
}
template <typename F>
void TestAllocatingOp(const F& f) {
ExpectNoThrow(f);
SetCountdown();
EXPECT_THROW(f(), exceptions_internal::TestBadAllocException);
UnsetCountdown();
}
TEST(ThrowingValueTest, ThrowingAllocatingOps) {
// make_unique calls unqualified operator new, so these exercise the
// ThrowingValue overloads.
TestAllocatingOp([]() { return absl::make_unique<ThrowingValue<>>(1); });
TestAllocatingOp([]() { return absl::make_unique<ThrowingValue<>[]>(2); });
}
TEST(ThrowingValueTest, NonThrowingMoveCtor) {
ThrowingValue<TypeSpec::kNoThrowMove> nothrow_ctor;
SetCountdown();
ExpectNoThrow([&nothrow_ctor]() {
ThrowingValue<TypeSpec::kNoThrowMove> nothrow1 = std::move(nothrow_ctor);
});
UnsetCountdown();
}
TEST(ThrowingValueTest, NonThrowingMoveAssign) {
ThrowingValue<TypeSpec::kNoThrowMove> nothrow_assign1, nothrow_assign2;
SetCountdown();
ExpectNoThrow([&nothrow_assign1, &nothrow_assign2]() {
nothrow_assign1 = std::move(nothrow_assign2);
});
UnsetCountdown();
}
TEST(ThrowingValueTest, ThrowingCopyCtor) {
ThrowingValue<> tv;
TestOp([&]() { ThrowingValue<> tv_copy(tv); });
}
TEST(ThrowingValueTest, ThrowingCopyAssign) {
ThrowingValue<> tv1, tv2;
TestOp([&]() { tv1 = tv2; });
}
TEST(ThrowingValueTest, NonThrowingCopyCtor) {
ThrowingValue<TypeSpec::kNoThrowCopy> nothrow_ctor;
SetCountdown();
ExpectNoThrow([&nothrow_ctor]() {
ThrowingValue<TypeSpec::kNoThrowCopy> nothrow1(nothrow_ctor);
});
UnsetCountdown();
}
TEST(ThrowingValueTest, NonThrowingCopyAssign) {
ThrowingValue<TypeSpec::kNoThrowCopy> nothrow_assign1, nothrow_assign2;
SetCountdown();
ExpectNoThrow([&nothrow_assign1, &nothrow_assign2]() {
nothrow_assign1 = nothrow_assign2;
});
UnsetCountdown();
}
TEST(ThrowingValueTest, ThrowingSwap) {
ThrowingValue<> bomb1, bomb2;
TestOp([&]() { std::swap(bomb1, bomb2); });
}
TEST(ThrowingValueTest, NonThrowingSwap) {
ThrowingValue<TypeSpec::kNoThrowMove> bomb1, bomb2;
ExpectNoThrow([&]() { std::swap(bomb1, bomb2); });
}
TEST(ThrowingValueTest, NonThrowingAllocation) {
ThrowingValue<TypeSpec::kNoThrowNew>* allocated;
ThrowingValue<TypeSpec::kNoThrowNew>* array;
ExpectNoThrow([&allocated]() {
allocated = new ThrowingValue<TypeSpec::kNoThrowNew>(1);
delete allocated;
});
ExpectNoThrow([&array]() {
array = new ThrowingValue<TypeSpec::kNoThrowNew>[2];
delete[] array;
});
}
TEST(ThrowingValueTest, NonThrowingDelete) {
auto* allocated = new ThrowingValue<>(1);
auto* array = new ThrowingValue<>[2];
SetCountdown();
ExpectNoThrow([allocated]() { delete allocated; });
SetCountdown();
ExpectNoThrow([array]() { delete[] array; });
UnsetCountdown();
}
using Storage =
absl::aligned_storage_t<sizeof(ThrowingValue<>), alignof(ThrowingValue<>)>;
TEST(ThrowingValueTest, NonThrowingPlacementDelete) {
constexpr int kArrayLen = 2;
// We intentionally create extra space to store the tag allocated by placement
// new[].
constexpr int kStorageLen = 4;
Storage buf;
Storage array_buf[kStorageLen];
auto* placed = new (&buf) ThrowingValue<>(1);
auto placed_array = new (&array_buf) ThrowingValue<>[kArrayLen];
SetCountdown();
ExpectNoThrow([placed, &buf]() {
placed->~ThrowingValue<>();
ThrowingValue<>::operator delete(placed, &buf);
});
SetCountdown();
ExpectNoThrow([&, placed_array]() {
for (int i = 0; i < kArrayLen; ++i) placed_array[i].~ThrowingValue<>();
ThrowingValue<>::operator delete[](placed_array, &array_buf);
});
UnsetCountdown();
}
TEST(ThrowingValueTest, NonThrowingDestructor) {
auto* allocated = new ThrowingValue<>();
SetCountdown();
ExpectNoThrow([allocated]() { delete allocated; });
UnsetCountdown();
}
TEST(ThrowingBoolTest, ThrowingBool) {
ThrowingBool t = true;
// Test that it's contextually convertible to bool
if (t) { // NOLINT(whitespace/empty_if_body)
}
EXPECT_TRUE(t);
TestOp([&]() { (void)!t; });
}
TEST(ThrowingAllocatorTest, MemoryManagement) {
// Just exercise the memory management capabilities under LSan to make sure we
// don't leak.
ThrowingAllocator<int> int_alloc;
int* ip = int_alloc.allocate(1);
int_alloc.deallocate(ip, 1);
int* i_array = int_alloc.allocate(2);
int_alloc.deallocate(i_array, 2);
ThrowingAllocator<ThrowingValue<>> tv_alloc;
ThrowingValue<>* ptr = tv_alloc.allocate(1);
tv_alloc.deallocate(ptr, 1);
ThrowingValue<>* tv_array = tv_alloc.allocate(2);
tv_alloc.deallocate(tv_array, 2);
}
TEST(ThrowingAllocatorTest, CallsGlobalNew) {
ThrowingAllocator<ThrowingValue<>, AllocSpec::kNoThrowAllocate> nothrow_alloc;
ThrowingValue<>* ptr;
SetCountdown();
// This will only throw if ThrowingValue::new is called.
ExpectNoThrow([&]() { ptr = nothrow_alloc.allocate(1); });
nothrow_alloc.deallocate(ptr, 1);
UnsetCountdown();
}
TEST(ThrowingAllocatorTest, ThrowingConstructors) {
ThrowingAllocator<int> int_alloc;
int* ip = nullptr;
SetCountdown();
EXPECT_THROW(ip = int_alloc.allocate(1), TestException);
ExpectNoThrow([&]() { ip = int_alloc.allocate(1); });
*ip = 1;
SetCountdown();
EXPECT_THROW(int_alloc.construct(ip, 2), TestException);
EXPECT_EQ(*ip, 1);
int_alloc.deallocate(ip, 1);
UnsetCountdown();
}
TEST(ThrowingAllocatorTest, NonThrowingConstruction) {
{
ThrowingAllocator<int, AllocSpec::kNoThrowAllocate> int_alloc;
int* ip = nullptr;
SetCountdown();
ExpectNoThrow([&]() { ip = int_alloc.allocate(1); });
SetCountdown();
ExpectNoThrow([&]() { int_alloc.construct(ip, 2); });
EXPECT_EQ(*ip, 2);
int_alloc.deallocate(ip, 1);
UnsetCountdown();
}
{
ThrowingAllocator<int> int_alloc;
int* ip = nullptr;
ExpectNoThrow([&]() { ip = int_alloc.allocate(1); });
ExpectNoThrow([&]() { int_alloc.construct(ip, 2); });
EXPECT_EQ(*ip, 2);
int_alloc.deallocate(ip, 1);
}
{
ThrowingAllocator<ThrowingValue<>, AllocSpec::kNoThrowAllocate>
nothrow_alloc;
ThrowingValue<>* ptr;
SetCountdown();
ExpectNoThrow([&]() { ptr = nothrow_alloc.allocate(1); });
SetCountdown();
ExpectNoThrow(
[&]() { nothrow_alloc.construct(ptr, 2, testing::nothrow_ctor); });
EXPECT_EQ(ptr->Get(), 2);
nothrow_alloc.destroy(ptr);
nothrow_alloc.deallocate(ptr, 1);
UnsetCountdown();
}
{
ThrowingAllocator<int> a;
SetCountdown();
ExpectNoThrow([&]() { ThrowingAllocator<double> a1 = a; });
SetCountdown();
ExpectNoThrow([&]() { ThrowingAllocator<double> a1 = std::move(a); });
UnsetCountdown();
}
}
TEST(ThrowingAllocatorTest, ThrowingAllocatorConstruction) {
ThrowingAllocator<int> a;
TestOp([]() { ThrowingAllocator<int> a; });
TestOp([&]() { a.select_on_container_copy_construction(); });
}
TEST(ThrowingAllocatorTest, State) {
ThrowingAllocator<int> a1, a2;
EXPECT_NE(a1, a2);
auto a3 = a1;
EXPECT_EQ(a3, a1);
int* ip = a1.allocate(1);
EXPECT_EQ(a3, a1);
a3.deallocate(ip, 1);
EXPECT_EQ(a3, a1);
}
TEST(ThrowingAllocatorTest, InVector) {
std::vector<ThrowingValue<>, ThrowingAllocator<ThrowingValue<>>> v;
for (int i = 0; i < 20; ++i) v.push_back({});
for (int i = 0; i < 20; ++i) v.pop_back();
}
TEST(ThrowingAllocatorTest, InList) {
std::list<ThrowingValue<>, ThrowingAllocator<ThrowingValue<>>> l;
for (int i = 0; i < 20; ++i) l.push_back({});
for (int i = 0; i < 20; ++i) l.pop_back();
for (int i = 0; i < 20; ++i) l.push_front({});
for (int i = 0; i < 20; ++i) l.pop_front();
}
template <typename TesterInstance, typename = void>
struct NullaryTestValidator : public std::false_type {};
template <typename TesterInstance>
struct NullaryTestValidator<
TesterInstance,
absl::void_t<decltype(std::declval<TesterInstance>().Test())>>
: public std::true_type {};
template <typename TesterInstance>
bool HasNullaryTest(const TesterInstance&) {
return NullaryTestValidator<TesterInstance>::value;
}
void DummyOp(void*) {}
template <typename TesterInstance, typename = void>
struct UnaryTestValidator : public std::false_type {};
template <typename TesterInstance>
struct UnaryTestValidator<
TesterInstance,
absl::void_t<decltype(std::declval<TesterInstance>().Test(DummyOp))>>
: public std::true_type {};
template <typename TesterInstance>
bool HasUnaryTest(const TesterInstance&) {
return UnaryTestValidator<TesterInstance>::value;
}
TEST(ExceptionSafetyTesterTest, IncompleteTypesAreNotTestable) {
using T = exceptions_internal::UninitializedT;
auto op = [](T* t) {};
auto inv = [](T*) { return testing::AssertionSuccess(); };
auto fac = []() { return absl::make_unique<T>(); };
// Test that providing operation and inveriants still does not allow for the
// the invocation of .Test() and .Test(op) because it lacks a factory
auto without_fac =
testing::MakeExceptionSafetyTester().WithOperation(op).WithContracts(
inv, testing::strong_guarantee);
EXPECT_FALSE(HasNullaryTest(without_fac));
EXPECT_FALSE(HasUnaryTest(without_fac));
// Test that providing contracts and factory allows the invocation of
// .Test(op) but does not allow for .Test() because it lacks an operation
auto without_op = testing::MakeExceptionSafetyTester()
.WithContracts(inv, testing::strong_guarantee)
.WithFactory(fac);
EXPECT_FALSE(HasNullaryTest(without_op));
EXPECT_TRUE(HasUnaryTest(without_op));
// Test that providing operation and factory still does not allow for the
// the invocation of .Test() and .Test(op) because it lacks contracts
auto without_inv =
testing::MakeExceptionSafetyTester().WithOperation(op).WithFactory(fac);
EXPECT_FALSE(HasNullaryTest(without_inv));
EXPECT_FALSE(HasUnaryTest(without_inv));
}
struct ExampleStruct {};
std::unique_ptr<ExampleStruct> ExampleFunctionFactory() {
return absl::make_unique<ExampleStruct>();
}
void ExampleFunctionOperation(ExampleStruct*) {}
testing::AssertionResult ExampleFunctionContract(ExampleStruct*) {
return testing::AssertionSuccess();
}
struct {
std::unique_ptr<ExampleStruct> operator()() const {
return ExampleFunctionFactory();
}
} example_struct_factory;
struct {
void operator()(ExampleStruct*) const {}
} example_struct_operation;
struct {
testing::AssertionResult operator()(ExampleStruct* example_struct) const {
return ExampleFunctionContract(example_struct);
}
} example_struct_contract;
auto example_lambda_factory = []() { return ExampleFunctionFactory(); };
auto example_lambda_operation = [](ExampleStruct*) {};
auto example_lambda_contract = [](ExampleStruct* example_struct) {
return ExampleFunctionContract(example_struct);
};
// Testing that function references, pointers, structs with operator() and
// lambdas can all be used with ExceptionSafetyTester
TEST(ExceptionSafetyTesterTest, MixedFunctionTypes) {
// function reference
EXPECT_TRUE(testing::MakeExceptionSafetyTester()
.WithFactory(ExampleFunctionFactory)
.WithOperation(ExampleFunctionOperation)
.WithContracts(ExampleFunctionContract)
.Test());
// function pointer
EXPECT_TRUE(testing::MakeExceptionSafetyTester()
.WithFactory(&ExampleFunctionFactory)
.WithOperation(&ExampleFunctionOperation)
.WithContracts(&ExampleFunctionContract)
.Test());
// struct
EXPECT_TRUE(testing::MakeExceptionSafetyTester()
.WithFactory(example_struct_factory)
.WithOperation(example_struct_operation)
.WithContracts(example_struct_contract)
.Test());
// lambda
EXPECT_TRUE(testing::MakeExceptionSafetyTester()
.WithFactory(example_lambda_factory)
.WithOperation(example_lambda_operation)
.WithContracts(example_lambda_contract)
.Test());
}
struct NonNegative {
bool operator==(const NonNegative& other) const { return i == other.i; }
int i;
};
testing::AssertionResult CheckNonNegativeInvariants(NonNegative* g) {
if (g->i >= 0) {
return testing::AssertionSuccess();
}
return testing::AssertionFailure()
<< "i should be non-negative but is " << g->i;
}
struct {
template <typename T>
void operator()(T* t) const {
(*t)();
}
} invoker;
auto tester =
testing::MakeExceptionSafetyTester().WithOperation(invoker).WithContracts(
CheckNonNegativeInvariants);
auto strong_tester = tester.WithContracts(testing::strong_guarantee);
struct FailsBasicGuarantee : public NonNegative {
void operator()() {
--i;
ThrowingValue<> bomb;
++i;
}
};
TEST(ExceptionCheckTest, BasicGuaranteeFailure) {
EXPECT_FALSE(tester.WithInitialValue(FailsBasicGuarantee{}).Test());
}
struct FollowsBasicGuarantee : public NonNegative {
void operator()() {
++i;
ThrowingValue<> bomb;
}
};
TEST(ExceptionCheckTest, BasicGuarantee) {
EXPECT_TRUE(tester.WithInitialValue(FollowsBasicGuarantee{}).Test());
}
TEST(ExceptionCheckTest, StrongGuaranteeFailure) {
EXPECT_FALSE(strong_tester.WithInitialValue(FailsBasicGuarantee{}).Test());
EXPECT_FALSE(strong_tester.WithInitialValue(FollowsBasicGuarantee{}).Test());
}
struct BasicGuaranteeWithExtraContracts : public NonNegative {
// After operator(), i is incremented. If operator() throws, i is set to 9999
void operator()() {
int old_i = i;
i = kExceptionSentinel;
ThrowingValue<> bomb;
i = ++old_i;
}
static constexpr int kExceptionSentinel = 9999;
};
constexpr int BasicGuaranteeWithExtraContracts::kExceptionSentinel;
TEST(ExceptionCheckTest, BasicGuaranteeWithExtraContracts) {
auto tester_with_val =
tester.WithInitialValue(BasicGuaranteeWithExtraContracts{});
EXPECT_TRUE(tester_with_val.Test());
EXPECT_TRUE(
tester_with_val
.WithContracts([](BasicGuaranteeWithExtraContracts* o) {
if (o->i == BasicGuaranteeWithExtraContracts::kExceptionSentinel) {
return testing::AssertionSuccess();
}
return testing::AssertionFailure()
<< "i should be "
<< BasicGuaranteeWithExtraContracts::kExceptionSentinel
<< ", but is " << o->i;
})
.Test());
}
struct FollowsStrongGuarantee : public NonNegative {
void operator()() { ThrowingValue<> bomb; }
};
TEST(ExceptionCheckTest, StrongGuarantee) {
EXPECT_TRUE(tester.WithInitialValue(FollowsStrongGuarantee{}).Test());
EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{}).Test());
}
struct HasReset : public NonNegative {
void operator()() {
i = -1;
ThrowingValue<> bomb;
i = 1;
}
void reset() { i = 0; }
};
testing::AssertionResult CheckHasResetContracts(HasReset* h) {
h->reset();
return testing::AssertionResult(h->i == 0);
}
TEST(ExceptionCheckTest, ModifyingChecker) {
auto set_to_1000 = [](FollowsBasicGuarantee* g) {
g->i = 1000;
return testing::AssertionSuccess();
};
auto is_1000 = [](FollowsBasicGuarantee* g) {
return testing::AssertionResult(g->i == 1000);
};
auto increment = [](FollowsStrongGuarantee* g) {
++g->i;
return testing::AssertionSuccess();
};
EXPECT_FALSE(tester.WithInitialValue(FollowsBasicGuarantee{})
.WithContracts(set_to_1000, is_1000)
.Test());
EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{})
.WithContracts(increment)
.Test());
EXPECT_TRUE(testing::MakeExceptionSafetyTester()
.WithInitialValue(HasReset{})
.WithContracts(CheckHasResetContracts)
.Test(invoker));
}
TEST(ExceptionSafetyTesterTest, ResetsCountdown) {
auto test =
testing::MakeExceptionSafetyTester()
.WithInitialValue(ThrowingValue<>())
.WithContracts([](ThrowingValue<>*) { return AssertionSuccess(); })
.WithOperation([](ThrowingValue<>*) {});
ASSERT_TRUE(test.Test());
// If the countdown isn't reset because there were no exceptions thrown, then
// this will fail with a termination from an unhandled exception
EXPECT_TRUE(test.Test());
}
struct NonCopyable : public NonNegative {
NonCopyable(const NonCopyable&) = delete;
NonCopyable() : NonNegative{0} {}
void operator()() { ThrowingValue<> bomb; }
};
TEST(ExceptionCheckTest, NonCopyable) {
auto factory = []() { return absl::make_unique<NonCopyable>(); };
EXPECT_TRUE(tester.WithFactory(factory).Test());
EXPECT_TRUE(strong_tester.WithFactory(factory).Test());
}
struct NonEqualityComparable : public NonNegative {
void operator()() { ThrowingValue<> bomb; }
void ModifyOnThrow() {
++i;
ThrowingValue<> bomb;
static_cast<void>(bomb);
--i;
}
};
TEST(ExceptionCheckTest, NonEqualityComparable) {
auto nec_is_strong = [](NonEqualityComparable* nec) {
return testing::AssertionResult(nec->i == NonEqualityComparable().i);
};
auto strong_nec_tester = tester.WithInitialValue(NonEqualityComparable{})
.WithContracts(nec_is_strong);
EXPECT_TRUE(strong_nec_tester.Test());
EXPECT_FALSE(strong_nec_tester.Test(
[](NonEqualityComparable* n) { n->ModifyOnThrow(); }));
}
template <typename T>
struct ExhaustivenessTester {
void operator()() {
successes |= 1;
T b1;
static_cast<void>(b1);
successes |= (1 << 1);
T b2;
static_cast<void>(b2);
successes |= (1 << 2);
T b3;
static_cast<void>(b3);
successes |= (1 << 3);
}
bool operator==(const ExhaustivenessTester<ThrowingValue<>>&) const {
return true;
}
static unsigned char successes;
};
struct {
template <typename T>
testing::AssertionResult operator()(ExhaustivenessTester<T>*) const {
return testing::AssertionSuccess();
}
} CheckExhaustivenessTesterContracts;
template <typename T>
unsigned char ExhaustivenessTester<T>::successes = 0;
TEST(ExceptionCheckTest, Exhaustiveness) {
auto exhaust_tester = testing::MakeExceptionSafetyTester()
.WithContracts(CheckExhaustivenessTesterContracts)
.WithOperation(invoker);
EXPECT_TRUE(
exhaust_tester.WithInitialValue(ExhaustivenessTester<int>{}).Test());
EXPECT_EQ(ExhaustivenessTester<int>::successes, 0xF);
EXPECT_TRUE(
exhaust_tester.WithInitialValue(ExhaustivenessTester<ThrowingValue<>>{})
.WithContracts(testing::strong_guarantee)
.Test());
EXPECT_EQ(ExhaustivenessTester<ThrowingValue<>>::successes, 0xF);
}
struct LeaksIfCtorThrows : private exceptions_internal::TrackedObject {
LeaksIfCtorThrows() : TrackedObject(ABSL_PRETTY_FUNCTION) {
++counter;
ThrowingValue<> v;
static_cast<void>(v);
--counter;
}
LeaksIfCtorThrows(const LeaksIfCtorThrows&) noexcept
: TrackedObject(ABSL_PRETTY_FUNCTION) {}
static int counter;
};
int LeaksIfCtorThrows::counter = 0;
TEST(ExceptionCheckTest, TestLeakyCtor) {
testing::TestThrowingCtor<LeaksIfCtorThrows>();
EXPECT_EQ(LeaksIfCtorThrows::counter, 1);
LeaksIfCtorThrows::counter = 0;
}
struct Tracked : private exceptions_internal::TrackedObject {
Tracked() : TrackedObject(ABSL_PRETTY_FUNCTION) {}
};
TEST(ConstructorTrackerTest, CreatedBefore) {
Tracked a, b, c;
exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown);
}
TEST(ConstructorTrackerTest, CreatedAfter) {
exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown);
Tracked a, b, c;
}
TEST(ConstructorTrackerTest, NotDestroyedAfter) {
absl::aligned_storage_t<sizeof(Tracked), alignof(Tracked)> storage;
EXPECT_NONFATAL_FAILURE(
{
exceptions_internal::ConstructorTracker ct(
exceptions_internal::countdown);
new (&storage) Tracked;
},
"not destroyed");
}
TEST(ConstructorTrackerTest, DestroyedTwice) {
exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown);
EXPECT_NONFATAL_FAILURE(
{
Tracked t;
t.~Tracked();
},
"re-destroyed");
}
TEST(ConstructorTrackerTest, ConstructedTwice) {
exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown);
absl::aligned_storage_t<sizeof(Tracked), alignof(Tracked)> storage;
EXPECT_NONFATAL_FAILURE(
{
new (&storage) Tracked;
new (&storage) Tracked;
reinterpret_cast<Tracked*>(&storage)->~Tracked();
},
"re-constructed");
}
TEST(ThrowingValueTraitsTest, RelationalOperators) {
ThrowingValue<> a, b;
EXPECT_TRUE((std::is_convertible<decltype(a == b), bool>::value));
EXPECT_TRUE((std::is_convertible<decltype(a != b), bool>::value));
EXPECT_TRUE((std::is_convertible<decltype(a < b), bool>::value));
EXPECT_TRUE((std::is_convertible<decltype(a <= b), bool>::value));
EXPECT_TRUE((std::is_convertible<decltype(a > b), bool>::value));
EXPECT_TRUE((std::is_convertible<decltype(a >= b), bool>::value));
}
TEST(ThrowingAllocatorTraitsTest, Assignablility) {
EXPECT_TRUE(absl::is_move_assignable<ThrowingAllocator<int>>::value);
EXPECT_TRUE(absl::is_copy_assignable<ThrowingAllocator<int>>::value);
EXPECT_TRUE(std::is_nothrow_move_assignable<ThrowingAllocator<int>>::value);
EXPECT_TRUE(std::is_nothrow_copy_assignable<ThrowingAllocator<int>>::value);
}
} // namespace
} // namespace testing

View File

@@ -0,0 +1,64 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <type_traits>
#include "absl/base/internal/inline_variable.h"
#include "absl/base/internal/inline_variable_testing.h"
#include "gtest/gtest.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace inline_variable_testing_internal {
namespace {
TEST(InlineVariableTest, Constexpr) {
static_assert(inline_variable_foo.value == 5, "");
static_assert(other_inline_variable_foo.value == 5, "");
static_assert(inline_variable_int == 5, "");
static_assert(other_inline_variable_int == 5, "");
}
TEST(InlineVariableTest, DefaultConstructedIdentityEquality) {
EXPECT_EQ(get_foo_a().value, 5);
EXPECT_EQ(get_foo_b().value, 5);
EXPECT_EQ(&get_foo_a(), &get_foo_b());
}
TEST(InlineVariableTest, DefaultConstructedIdentityInequality) {
EXPECT_NE(&inline_variable_foo, &other_inline_variable_foo);
}
TEST(InlineVariableTest, InitializedIdentityEquality) {
EXPECT_EQ(get_int_a(), 5);
EXPECT_EQ(get_int_b(), 5);
EXPECT_EQ(&get_int_a(), &get_int_b());
}
TEST(InlineVariableTest, InitializedIdentityInequality) {
EXPECT_NE(&inline_variable_int, &other_inline_variable_int);
}
TEST(InlineVariableTest, FunPtrType) {
static_assert(
std::is_same<void(*)(),
std::decay<decltype(inline_variable_fun_ptr)>::type>::value,
"");
}
} // namespace
} // namespace inline_variable_testing_internal
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,27 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/inline_variable_testing.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace inline_variable_testing_internal {
const Foo& get_foo_a() { return inline_variable_foo; }
const int& get_int_a() { return inline_variable_int; }
} // namespace inline_variable_testing_internal
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,27 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/inline_variable_testing.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace inline_variable_testing_internal {
const Foo& get_foo_b() { return inline_variable_foo; }
const int& get_int_b() { return inline_variable_int; }
} // namespace inline_variable_testing_internal
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,167 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
#include <atomic>
#include <cassert>
#include <cstdint>
#include <utility>
#ifdef _MSC_FULL_VER
#define ABSL_HAVE_WORKING_ATOMIC_POINTER 0
#else
#define ABSL_HAVE_WORKING_ATOMIC_POINTER 1
#endif
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
template <typename T>
class AtomicHook;
// AtomicHook is a helper class, templatized on a raw function pointer type, for
// implementing Abseil customization hooks. It is a callable object that
// dispatches to the registered hook.
//
// A default constructed object performs a no-op (and returns a default
// constructed object) if no hook has been registered.
//
// Hooks can be pre-registered via constant initialization, for example,
// ABSL_CONST_INIT static AtomicHook<void(*)()> my_hook(DefaultAction);
// and then changed at runtime via a call to Store().
//
// Reads and writes guarantee memory_order_acquire/memory_order_release
// semantics.
template <typename ReturnType, typename... Args>
class AtomicHook<ReturnType (*)(Args...)> {
public:
using FnPtr = ReturnType (*)(Args...);
// Constructs an object that by default performs a no-op (and
// returns a default constructed object) when no hook as been registered.
constexpr AtomicHook() : AtomicHook(DummyFunction) {}
// Constructs an object that by default dispatches to/returns the
// pre-registered default_fn when no hook has been registered at runtime.
#if ABSL_HAVE_WORKING_ATOMIC_POINTER
explicit constexpr AtomicHook(FnPtr default_fn)
: hook_(default_fn), default_fn_(default_fn) {}
#else
explicit constexpr AtomicHook(FnPtr default_fn)
: hook_(kUninitialized), default_fn_(default_fn) {}
#endif
// Stores the provided function pointer as the value for this hook.
//
// This is intended to be called once. Multiple calls are legal only if the
// same function pointer is provided for each call. The store is implemented
// as a memory_order_release operation, and read accesses are implemented as
// memory_order_acquire.
void Store(FnPtr fn) {
bool success = DoStore(fn);
static_cast<void>(success);
assert(success);
}
// Invokes the registered callback. If no callback has yet been registered, a
// default-constructed object of the appropriate type is returned instead.
template <typename... CallArgs>
ReturnType operator()(CallArgs&&... args) const {
return DoLoad()(std::forward<CallArgs>(args)...);
}
// Returns the registered callback, or nullptr if none has been registered.
// Useful if client code needs to conditionalize behavior based on whether a
// callback was registered.
//
// Note that atomic_hook.Load()() and atomic_hook() have different semantics:
// operator()() will perform a no-op if no callback was registered, while
// Load()() will dereference a null function pointer. Prefer operator()() to
// Load()() unless you must conditionalize behavior on whether a hook was
// registered.
FnPtr Load() const {
FnPtr ptr = DoLoad();
return (ptr == DummyFunction) ? nullptr : ptr;
}
private:
static ReturnType DummyFunction(Args...) {
return ReturnType();
}
// Current versions of MSVC (as of September 2017) have a broken
// implementation of std::atomic<T*>: Its constructor attempts to do the
// equivalent of a reinterpret_cast in a constexpr context, which is not
// allowed.
//
// This causes an issue when building with LLVM under Windows. To avoid this,
// we use a less-efficient, intptr_t-based implementation on Windows.
#if ABSL_HAVE_WORKING_ATOMIC_POINTER
// Return the stored value, or DummyFunction if no value has been stored.
FnPtr DoLoad() const { return hook_.load(std::memory_order_acquire); }
// Store the given value. Returns false if a different value was already
// stored to this object.
bool DoStore(FnPtr fn) {
assert(fn);
FnPtr expected = default_fn_;
const bool store_succeeded = hook_.compare_exchange_strong(
expected, fn, std::memory_order_acq_rel, std::memory_order_acquire);
const bool same_value_already_stored = (expected == fn);
return store_succeeded || same_value_already_stored;
}
std::atomic<FnPtr> hook_;
#else // !ABSL_HAVE_WORKING_ATOMIC_POINTER
// Use a sentinel value unlikely to be the address of an actual function.
static constexpr intptr_t kUninitialized = 0;
static_assert(sizeof(intptr_t) >= sizeof(FnPtr),
"intptr_t can't contain a function pointer");
FnPtr DoLoad() const {
const intptr_t value = hook_.load(std::memory_order_acquire);
if (value == kUninitialized) {
return default_fn_;
}
return reinterpret_cast<FnPtr>(value);
}
bool DoStore(FnPtr fn) {
assert(fn);
const auto value = reinterpret_cast<intptr_t>(fn);
intptr_t expected = kUninitialized;
const bool store_succeeded = hook_.compare_exchange_strong(
expected, value, std::memory_order_acq_rel, std::memory_order_acquire);
const bool same_value_already_stored = (expected == value);
return store_succeeded || same_value_already_stored;
}
std::atomic<intptr_t> hook_;
#endif
const FnPtr default_fn_;
};
#undef ABSL_HAVE_WORKING_ATOMIC_POINTER
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_

View File

@@ -0,0 +1,70 @@
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/atomic_hook.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
namespace {
int value = 0;
void TestHook(int x) { value = x; }
TEST(AtomicHookTest, NoDefaultFunction) {
ABSL_CONST_INIT static absl::base_internal::AtomicHook<void(*)(int)> hook;
value = 0;
// Test the default DummyFunction.
EXPECT_TRUE(hook.Load() == nullptr);
EXPECT_EQ(value, 0);
hook(1);
EXPECT_EQ(value, 0);
// Test a stored hook.
hook.Store(TestHook);
EXPECT_TRUE(hook.Load() == TestHook);
EXPECT_EQ(value, 0);
hook(1);
EXPECT_EQ(value, 1);
// Calling Store() with the same hook should not crash.
hook.Store(TestHook);
EXPECT_TRUE(hook.Load() == TestHook);
EXPECT_EQ(value, 1);
hook(2);
EXPECT_EQ(value, 2);
}
TEST(AtomicHookTest, WithDefaultFunction) {
// Set the default value to TestHook at compile-time.
ABSL_CONST_INIT static absl::base_internal::AtomicHook<void (*)(int)> hook(
TestHook);
value = 0;
// Test the default value is TestHook.
EXPECT_TRUE(hook.Load() == TestHook);
EXPECT_EQ(value, 0);
hook(1);
EXPECT_EQ(value, 1);
// Calling Store() with the same hook should not crash.
hook.Store(TestHook);
EXPECT_TRUE(hook.Load() == TestHook);
EXPECT_EQ(value, 1);
hook(2);
EXPECT_EQ(value, 2);
}
} // namespace

View File

@@ -0,0 +1,195 @@
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_BASE_INTERNAL_BITS_H_
#define ABSL_BASE_INTERNAL_BITS_H_
// This file contains bitwise ops which are implementation details of various
// absl libraries.
#include <cstdint>
// Clang on Windows has __builtin_clzll; otherwise we need to use the
// windows intrinsic functions.
#if defined(_MSC_VER)
#include <intrin.h>
#if defined(_M_X64)
#pragma intrinsic(_BitScanReverse64)
#pragma intrinsic(_BitScanForward64)
#endif
#pragma intrinsic(_BitScanReverse)
#pragma intrinsic(_BitScanForward)
#endif
#include "absl/base/attributes.h"
#if defined(_MSC_VER)
// We can achieve something similar to attribute((always_inline)) with MSVC by
// using the __forceinline keyword, however this is not perfect. MSVC is
// much less aggressive about inlining, and even with the __forceinline keyword.
#define ABSL_BASE_INTERNAL_FORCEINLINE __forceinline
#else
// Use default attribute inline.
#define ABSL_BASE_INTERNAL_FORCEINLINE inline ABSL_ATTRIBUTE_ALWAYS_INLINE
#endif
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64Slow(uint64_t n) {
int zeroes = 60;
if (n >> 32) zeroes -= 32, n >>= 32;
if (n >> 16) zeroes -= 16, n >>= 16;
if (n >> 8) zeroes -= 8, n >>= 8;
if (n >> 4) zeroes -= 4, n >>= 4;
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) {
#if defined(_MSC_VER) && defined(_M_X64)
// MSVC does not have __buitin_clzll. Use _BitScanReverse64.
unsigned long result = 0; // NOLINT(runtime/int)
if (_BitScanReverse64(&result, n)) {
return 63 - result;
}
return 64;
#elif defined(_MSC_VER)
// MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse
unsigned long result = 0; // NOLINT(runtime/int)
if ((n >> 32) && _BitScanReverse(&result, n >> 32)) {
return 31 - result;
}
if (_BitScanReverse(&result, n)) {
return 63 - result;
}
return 64;
#elif defined(__GNUC__)
// Use __builtin_clzll, which uses the following instructions:
// x86: bsr
// ARM64: clz
// PPC: cntlzd
static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int)
"__builtin_clzll does not take 64-bit arg");
// Handle 0 as a special case because __builtin_clzll(0) is undefined.
if (n == 0) {
return 64;
}
return __builtin_clzll(n);
#else
return CountLeadingZeros64Slow(n);
#endif
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32Slow(uint64_t n) {
int zeroes = 28;
if (n >> 16) zeroes -= 16, n >>= 16;
if (n >> 8) zeroes -= 8, n >>= 8;
if (n >> 4) zeroes -= 4, n >>= 4;
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32(uint32_t n) {
#if defined(_MSC_VER)
unsigned long result = 0; // NOLINT(runtime/int)
if (_BitScanReverse(&result, n)) {
return 31 - result;
}
return 32;
#elif defined(__GNUC__)
// Use __builtin_clz, which uses the following instructions:
// x86: bsr
// ARM64: clz
// PPC: cntlzd
static_assert(sizeof(int) == sizeof(n),
"__builtin_clz does not take 32-bit arg");
// Handle 0 as a special case because __builtin_clz(0) is undefined.
if (n == 0) {
return 32;
}
return __builtin_clz(n);
#else
return CountLeadingZeros32Slow(n);
#endif
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64Slow(uint64_t n) {
int c = 63;
n &= ~n + 1;
if (n & 0x00000000FFFFFFFF) c -= 32;
if (n & 0x0000FFFF0000FFFF) c -= 16;
if (n & 0x00FF00FF00FF00FF) c -= 8;
if (n & 0x0F0F0F0F0F0F0F0F) c -= 4;
if (n & 0x3333333333333333) c -= 2;
if (n & 0x5555555555555555) c -= 1;
return c;
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64(uint64_t n) {
#if defined(_MSC_VER) && defined(_M_X64)
unsigned long result = 0; // NOLINT(runtime/int)
_BitScanForward64(&result, n);
return result;
#elif defined(_MSC_VER)
unsigned long result = 0; // NOLINT(runtime/int)
if (static_cast<uint32_t>(n) == 0) {
_BitScanForward(&result, n >> 32);
return result + 32;
}
_BitScanForward(&result, n);
return result;
#elif defined(__GNUC__)
static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int)
"__builtin_ctzll does not take 64-bit arg");
return __builtin_ctzll(n);
#else
return CountTrailingZerosNonZero64Slow(n);
#endif
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32Slow(uint32_t n) {
int c = 31;
n &= ~n + 1;
if (n & 0x0000FFFF) c -= 16;
if (n & 0x00FF00FF) c -= 8;
if (n & 0x0F0F0F0F) c -= 4;
if (n & 0x33333333) c -= 2;
if (n & 0x55555555) c -= 1;
return c;
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32(uint32_t n) {
#if defined(_MSC_VER)
unsigned long result = 0; // NOLINT(runtime/int)
_BitScanForward(&result, n);
return result;
#elif defined(__GNUC__)
static_assert(sizeof(int) == sizeof(n),
"__builtin_ctz does not take 32-bit arg");
return __builtin_ctz(n);
#else
return CountTrailingZerosNonZero32Slow(n);
#endif
}
#undef ABSL_BASE_INTERNAL_FORCEINLINE
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_BITS_H_

View File

@@ -0,0 +1,97 @@
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/bits.h"
#include "gtest/gtest.h"
namespace {
int CLZ64(uint64_t n) {
int fast = absl::base_internal::CountLeadingZeros64(n);
int slow = absl::base_internal::CountLeadingZeros64Slow(n);
EXPECT_EQ(fast, slow) << n;
return fast;
}
TEST(BitsTest, CountLeadingZeros64) {
EXPECT_EQ(64, CLZ64(uint64_t{}));
EXPECT_EQ(0, CLZ64(~uint64_t{}));
for (int index = 0; index < 64; index++) {
uint64_t x = static_cast<uint64_t>(1) << index;
const auto cnt = 63 - index;
ASSERT_EQ(cnt, CLZ64(x)) << index;
ASSERT_EQ(cnt, CLZ64(x + x - 1)) << index;
}
}
int CLZ32(uint32_t n) {
int fast = absl::base_internal::CountLeadingZeros32(n);
int slow = absl::base_internal::CountLeadingZeros32Slow(n);
EXPECT_EQ(fast, slow) << n;
return fast;
}
TEST(BitsTest, CountLeadingZeros32) {
EXPECT_EQ(32, CLZ32(uint32_t{}));
EXPECT_EQ(0, CLZ32(~uint32_t{}));
for (int index = 0; index < 32; index++) {
uint32_t x = static_cast<uint32_t>(1) << index;
const auto cnt = 31 - index;
ASSERT_EQ(cnt, CLZ32(x)) << index;
ASSERT_EQ(cnt, CLZ32(x + x - 1)) << index;
ASSERT_EQ(CLZ64(x), CLZ32(x) + 32);
}
}
int CTZ64(uint64_t n) {
int fast = absl::base_internal::CountTrailingZerosNonZero64(n);
int slow = absl::base_internal::CountTrailingZerosNonZero64Slow(n);
EXPECT_EQ(fast, slow) << n;
return fast;
}
TEST(BitsTest, CountTrailingZerosNonZero64) {
EXPECT_EQ(0, CTZ64(~uint64_t{}));
for (int index = 0; index < 64; index++) {
uint64_t x = static_cast<uint64_t>(1) << index;
const auto cnt = index;
ASSERT_EQ(cnt, CTZ64(x)) << index;
ASSERT_EQ(cnt, CTZ64(~(x - 1))) << index;
}
}
int CTZ32(uint32_t n) {
int fast = absl::base_internal::CountTrailingZerosNonZero32(n);
int slow = absl::base_internal::CountTrailingZerosNonZero32Slow(n);
EXPECT_EQ(fast, slow) << n;
return fast;
}
TEST(BitsTest, CountTrailingZerosNonZero32) {
EXPECT_EQ(0, CTZ32(~uint32_t{}));
for (int index = 0; index < 32; index++) {
uint32_t x = static_cast<uint32_t>(1) << index;
const auto cnt = index;
ASSERT_EQ(cnt, CTZ32(x)) << index;
ASSERT_EQ(cnt, CTZ32(~(x - 1))) << index;
}
}
} // namespace

View File

@@ -0,0 +1,22 @@
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <iostream>
#include "absl/base/internal/thread_identity.h"
int main() {
auto* tid = absl::base_internal::CurrentThreadIdentityIfPresent();
// Make sure the above call can't be optimized out
std::cout << (void*)tid << std::endl;
}

View File

@@ -0,0 +1,107 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The implementation of CycleClock::Frequency.
//
// NOTE: only i386 and x86_64 have been well tested.
// PPC, sparc, alpha, and ia64 are based on
// http://peter.kuscsik.com/wordpress/?p=14
// with modifications by m3b. See also
// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
#include "absl/base/internal/cycleclock.h"
#include <atomic>
#include <chrono> // NOLINT(build/c++11)
#include "absl/base/internal/unscaledcycleclock.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
#if ABSL_USE_UNSCALED_CYCLECLOCK
namespace {
#ifdef NDEBUG
#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
// Not debug mode and the UnscaledCycleClock frequency is the CPU
// frequency. Scale the CycleClock to prevent overflow if someone
// tries to represent the time as cycles since the Unix epoch.
static constexpr int32_t kShift = 1;
#else
// Not debug mode and the UnscaledCycleClock isn't operating at the
// raw CPU frequency. There is no need to do any scaling, so don't
// needlessly sacrifice precision.
static constexpr int32_t kShift = 0;
#endif
#else
// In debug mode use a different shift to discourage depending on a
// particular shift value.
static constexpr int32_t kShift = 2;
#endif
static constexpr double kFrequencyScale = 1.0 / (1 << kShift);
static std::atomic<CycleClockSourceFunc> cycle_clock_source;
CycleClockSourceFunc LoadCycleClockSource() {
// Optimize for the common case (no callback) by first doing a relaxed load;
// this is significantly faster on non-x86 platforms.
if (cycle_clock_source.load(std::memory_order_relaxed) == nullptr) {
return nullptr;
}
// This corresponds to the store(std::memory_order_release) in
// CycleClockSource::Register, and makes sure that any updates made prior to
// registering the callback are visible to this thread before the callback is
// invoked.
return cycle_clock_source.load(std::memory_order_acquire);
}
} // namespace
int64_t CycleClock::Now() {
auto fn = LoadCycleClockSource();
if (fn == nullptr) {
return base_internal::UnscaledCycleClock::Now() >> kShift;
}
return fn() >> kShift;
}
double CycleClock::Frequency() {
return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
}
void CycleClockSource::Register(CycleClockSourceFunc source) {
// Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource.
cycle_clock_source.store(source, std::memory_order_release);
}
#else
int64_t CycleClock::Now() {
return std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::steady_clock::now().time_since_epoch())
.count();
}
double CycleClock::Frequency() {
return 1e9;
}
#endif
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,92 @@
//
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: cycleclock.h
// -----------------------------------------------------------------------------
//
// This header file defines a `CycleClock`, which yields the value and frequency
// of a cycle counter that increments at a rate that is approximately constant.
//
// NOTE:
//
// The cycle counter frequency is not necessarily related to the core clock
// frequency and should not be treated as such. That is, `CycleClock` cycles are
// not necessarily "CPU cycles" and code should not rely on that behavior, even
// if experimentally observed.
//
// An arbitrary offset may have been added to the counter at power on.
//
// On some platforms, the rate and offset of the counter may differ
// slightly when read from different CPUs of a multiprocessor. Usually,
// we try to ensure that the operating system adjusts values periodically
// so that values agree approximately. If you need stronger guarantees,
// consider using alternate interfaces.
//
// The CPU is not required to maintain the ordering of a cycle counter read
// with respect to surrounding instructions.
#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_
#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_
#include <cstdint>
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
// -----------------------------------------------------------------------------
// CycleClock
// -----------------------------------------------------------------------------
class CycleClock {
public:
// CycleClock::Now()
//
// Returns the value of a cycle counter that counts at a rate that is
// approximately constant.
static int64_t Now();
// CycleClock::Frequency()
//
// Returns the amount by which `CycleClock::Now()` increases per second. Note
// that this value may not necessarily match the core CPU clock frequency.
static double Frequency();
private:
CycleClock() = delete; // no instances
CycleClock(const CycleClock&) = delete;
CycleClock& operator=(const CycleClock&) = delete;
};
using CycleClockSourceFunc = int64_t (*)();
class CycleClockSource {
private:
// CycleClockSource::Register()
//
// Register a function that provides an alternate source for the unscaled CPU
// cycle count value. The source function must be async signal safe, must not
// call CycleClock::Now(), and must have a frequency that matches that of the
// unscaled clock used by CycleClock. A nullptr value resets CycleClock to use
// the default source.
static void Register(CycleClockSourceFunc source);
};
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_

View File

@@ -0,0 +1,161 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Functions for directly invoking mmap() via syscall, avoiding the case where
// mmap() has been locally overridden.
#ifndef ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
#define ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
#include "absl/base/config.h"
#if ABSL_HAVE_MMAP
#include <sys/mman.h>
#ifdef __linux__
#include <sys/types.h>
#ifdef __BIONIC__
#include <sys/syscall.h>
#else
#include <syscall.h>
#endif
#include <linux/unistd.h>
#include <unistd.h>
#include <cerrno>
#include <cstdarg>
#include <cstdint>
#ifdef __mips__
// Include definitions of the ABI currently in use.
#ifdef __BIONIC__
// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the
// definitions we need.
#include <asm/sgidefs.h>
#else
#include <sgidefs.h>
#endif // __BIONIC__
#endif // __mips__
// SYS_mmap and SYS_munmap are not defined in Android.
#ifdef __BIONIC__
extern "C" void* __mmap2(void*, size_t, int, int, int, size_t);
#if defined(__NR_mmap) && !defined(SYS_mmap)
#define SYS_mmap __NR_mmap
#endif
#ifndef SYS_munmap
#define SYS_munmap __NR_munmap
#endif
#endif // __BIONIC__
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
// Platform specific logic extracted from
// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h
inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
off64_t offset) noexcept {
#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
(defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \
(defined(__PPC__) && !defined(__PPC64__)) || \
(defined(__s390__) && !defined(__s390x__))
// On these architectures, implement mmap with mmap2.
static int pagesize = 0;
if (pagesize == 0) {
#if defined(__wasm__) || defined(__asmjs__)
pagesize = getpagesize();
#else
pagesize = sysconf(_SC_PAGESIZE);
#endif
}
if (offset < 0 || offset % pagesize != 0) {
errno = EINVAL;
return MAP_FAILED;
}
#ifdef __BIONIC__
// SYS_mmap2 has problems on Android API level <= 16.
// Workaround by invoking __mmap2() instead.
return __mmap2(start, length, prot, flags, fd, offset / pagesize);
#else
return reinterpret_cast<void*>(
syscall(SYS_mmap2, start, length, prot, flags, fd,
static_cast<off_t>(offset / pagesize)));
#endif
#elif defined(__s390x__)
// On s390x, mmap() arguments are passed in memory.
unsigned long buf[6] = {reinterpret_cast<unsigned long>(start), // NOLINT
static_cast<unsigned long>(length), // NOLINT
static_cast<unsigned long>(prot), // NOLINT
static_cast<unsigned long>(flags), // NOLINT
static_cast<unsigned long>(fd), // NOLINT
static_cast<unsigned long>(offset)}; // NOLINT
return reinterpret_cast<void*>(syscall(SYS_mmap, buf));
#elif defined(__x86_64__)
// The x32 ABI has 32 bit longs, but the syscall interface is 64 bit.
// We need to explicitly cast to an unsigned 64 bit type to avoid implicit
// sign extension. We can't cast pointers directly because those are
// 32 bits, and gcc will dump ugly warnings about casting from a pointer
// to an integer of a different size. We also need to make sure __off64_t
// isn't truncated to 32-bits under x32.
#define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x))
return reinterpret_cast<void*>(
syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length),
MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags),
MMAP_SYSCALL_ARG(fd), static_cast<uint64_t>(offset)));
#undef MMAP_SYSCALL_ARG
#else // Remaining 64-bit aritectures.
static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit");
return reinterpret_cast<void*>(
syscall(SYS_mmap, start, length, prot, flags, fd, offset));
#endif
}
inline int DirectMunmap(void* start, size_t length) {
return static_cast<int>(syscall(SYS_munmap, start, length));
}
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#else // !__linux__
// For non-linux platforms where we have mmap, just dispatch directly to the
// actual mmap()/munmap() methods.
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
off_t offset) {
return mmap(start, length, prot, flags, fd, offset);
}
inline int DirectMunmap(void* start, size_t length) {
return munmap(start, length);
}
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // __linux__
#endif // ABSL_HAVE_MMAP
#endif // ABSL_BASE_INTERNAL_DIRECT_MMAP_H_

View File

@@ -0,0 +1,274 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_
#define ABSL_BASE_INTERNAL_ENDIAN_H_
// The following guarantees declaration of the byte swap functions
#ifdef _MSC_VER
#include <stdlib.h> // NOLINT(build/include)
#elif defined(__APPLE__)
// macOS / Darwin features
#include <libkern/OSByteOrder.h>
#elif defined(__FreeBSD__)
#include <sys/endian.h>
#elif defined(__GLIBC__)
#include <byteswap.h> // IWYU pragma: export
#endif
#include <cstdint>
#include "absl/base/config.h"
#include "absl/base/internal/unaligned_access.h"
#include "absl/base/port.h"
namespace absl {
inline namespace lts_2019_08_08 {
// Use compiler byte-swapping intrinsics if they are available. 32-bit
// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0.
// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0.
// For simplicity, we enable them all only for GCC 4.8.0 or later.
#if defined(__clang__) || \
(defined(__GNUC__) && \
((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5))
inline uint64_t gbswap_64(uint64_t host_int) {
return __builtin_bswap64(host_int);
}
inline uint32_t gbswap_32(uint32_t host_int) {
return __builtin_bswap32(host_int);
}
inline uint16_t gbswap_16(uint16_t host_int) {
return __builtin_bswap16(host_int);
}
#elif defined(_MSC_VER)
inline uint64_t gbswap_64(uint64_t host_int) {
return _byteswap_uint64(host_int);
}
inline uint32_t gbswap_32(uint32_t host_int) {
return _byteswap_ulong(host_int);
}
inline uint16_t gbswap_16(uint16_t host_int) {
return _byteswap_ushort(host_int);
}
#elif defined(__APPLE__)
inline uint64_t gbswap_64(uint64_t host_int) { return OSSwapInt16(host_int); }
inline uint32_t gbswap_32(uint32_t host_int) { return OSSwapInt32(host_int); }
inline uint16_t gbswap_16(uint16_t host_int) { return OSSwapInt64(host_int); }
#else
inline uint64_t gbswap_64(uint64_t host_int) {
#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__)
// Adapted from /usr/include/byteswap.h. Not available on Mac.
if (__builtin_constant_p(host_int)) {
return __bswap_constant_64(host_int);
} else {
uint64_t result;
__asm__("bswap %0" : "=r"(result) : "0"(host_int));
return result;
}
#elif defined(__GLIBC__)
return bswap_64(host_int);
#else
return (((host_int & uint64_t{0xFF}) << 56) |
((host_int & uint64_t{0xFF00}) << 40) |
((host_int & uint64_t{0xFF0000}) << 24) |
((host_int & uint64_t{0xFF000000}) << 8) |
((host_int & uint64_t{0xFF00000000}) >> 8) |
((host_int & uint64_t{0xFF0000000000}) >> 24) |
((host_int & uint64_t{0xFF000000000000}) >> 40) |
((host_int & uint64_t{0xFF00000000000000}) >> 56));
#endif // bswap_64
}
inline uint32_t gbswap_32(uint32_t host_int) {
#if defined(__GLIBC__)
return bswap_32(host_int);
#else
return (((host_int & uint32_t{0xFF}) << 24) |
((host_int & uint32_t{0xFF00}) << 8) |
((host_int & uint32_t{0xFF0000}) >> 8) |
((host_int & uint32_t{0xFF000000}) >> 24));
#endif
}
inline uint16_t gbswap_16(uint16_t host_int) {
#if defined(__GLIBC__)
return bswap_16(host_int);
#else
return (((host_int & uint16_t{0xFF}) << 8) |
((host_int & uint16_t{0xFF00}) >> 8));
#endif
}
#endif // intrinics available
#ifdef ABSL_IS_LITTLE_ENDIAN
// Definitions for ntohl etc. that don't require us to include
// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather
// than just #defining them because in debug mode, gcc doesn't
// correctly handle the (rather involved) definitions of bswap_32.
// gcc guarantees that inline functions are as fast as macros, so
// this isn't a performance hit.
inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
#elif defined ABSL_IS_BIG_ENDIAN
// These definitions are simpler on big-endian machines
// These are functions instead of macros to avoid self-assignment warnings
// on calls such as "i = ghtnol(i);". This also provides type checking.
inline uint16_t ghtons(uint16_t x) { return x; }
inline uint32_t ghtonl(uint32_t x) { return x; }
inline uint64_t ghtonll(uint64_t x) { return x; }
#else
#error \
"Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \
"ABSL_IS_LITTLE_ENDIAN must be defined"
#endif // byte order
inline uint16_t gntohs(uint16_t x) { return ghtons(x); }
inline uint32_t gntohl(uint32_t x) { return ghtonl(x); }
inline uint64_t gntohll(uint64_t x) { return ghtonll(x); }
// Utilities to convert numbers between the current hosts's native byte
// order and little-endian byte order
//
// Load/Store methods are alignment safe
namespace little_endian {
// Conversion functions.
#ifdef ABSL_IS_LITTLE_ENDIAN
inline uint16_t FromHost16(uint16_t x) { return x; }
inline uint16_t ToHost16(uint16_t x) { return x; }
inline uint32_t FromHost32(uint32_t x) { return x; }
inline uint32_t ToHost32(uint32_t x) { return x; }
inline uint64_t FromHost64(uint64_t x) { return x; }
inline uint64_t ToHost64(uint64_t x) { return x; }
inline constexpr bool IsLittleEndian() { return true; }
#elif defined ABSL_IS_BIG_ENDIAN
inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
inline constexpr bool IsLittleEndian() { return false; }
#endif /* ENDIAN */
// Functions to do unaligned loads and stores in little-endian order.
inline uint16_t Load16(const void *p) {
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
}
inline void Store16(void *p, uint16_t v) {
ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
}
inline uint32_t Load32(const void *p) {
return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
}
inline void Store32(void *p, uint32_t v) {
ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
}
inline uint64_t Load64(const void *p) {
return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
}
inline void Store64(void *p, uint64_t v) {
ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
}
} // namespace little_endian
// Utilities to convert numbers between the current hosts's native byte
// order and big-endian byte order (same as network byte order)
//
// Load/Store methods are alignment safe
namespace big_endian {
#ifdef ABSL_IS_LITTLE_ENDIAN
inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
inline constexpr bool IsLittleEndian() { return true; }
#elif defined ABSL_IS_BIG_ENDIAN
inline uint16_t FromHost16(uint16_t x) { return x; }
inline uint16_t ToHost16(uint16_t x) { return x; }
inline uint32_t FromHost32(uint32_t x) { return x; }
inline uint32_t ToHost32(uint32_t x) { return x; }
inline uint64_t FromHost64(uint64_t x) { return x; }
inline uint64_t ToHost64(uint64_t x) { return x; }
inline constexpr bool IsLittleEndian() { return false; }
#endif /* ENDIAN */
// Functions to do unaligned loads and stores in big-endian order.
inline uint16_t Load16(const void *p) {
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
}
inline void Store16(void *p, uint16_t v) {
ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
}
inline uint32_t Load32(const void *p) {
return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
}
inline void Store32(void *p, uint32_t v) {
ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
}
inline uint64_t Load64(const void *p) {
return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
}
inline void Store64(void *p, uint64_t v) {
ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
}
} // namespace big_endian
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_ENDIAN_H_

View File

@@ -0,0 +1,265 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/endian.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <random>
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/config.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace {
const uint64_t kInitialNumber{0x0123456789abcdef};
const uint64_t k64Value{kInitialNumber};
const uint32_t k32Value{0x01234567};
const uint16_t k16Value{0x0123};
const int kNumValuesToTest = 1000000;
const int kRandomSeed = 12345;
#if defined(ABSL_IS_BIG_ENDIAN)
const uint64_t kInitialInNetworkOrder{kInitialNumber};
const uint64_t k64ValueLE{0xefcdab8967452301};
const uint32_t k32ValueLE{0x67452301};
const uint16_t k16ValueLE{0x2301};
const uint64_t k64ValueBE{kInitialNumber};
const uint32_t k32ValueBE{k32Value};
const uint16_t k16ValueBE{k16Value};
#elif defined(ABSL_IS_LITTLE_ENDIAN)
const uint64_t kInitialInNetworkOrder{0xefcdab8967452301};
const uint64_t k64ValueLE{kInitialNumber};
const uint32_t k32ValueLE{k32Value};
const uint16_t k16ValueLE{k16Value};
const uint64_t k64ValueBE{0xefcdab8967452301};
const uint32_t k32ValueBE{0x67452301};
const uint16_t k16ValueBE{0x2301};
#endif
template<typename T>
std::vector<T> GenerateAllValuesForType() {
std::vector<T> result;
T next = std::numeric_limits<T>::min();
while (true) {
result.push_back(next);
if (next == std::numeric_limits<T>::max()) {
return result;
}
++next;
}
}
template<typename T>
std::vector<T> GenerateRandomIntegers(size_t numValuesToTest) {
std::vector<T> result;
std::mt19937_64 rng(kRandomSeed);
for (size_t i = 0; i < numValuesToTest; ++i) {
result.push_back(rng());
}
return result;
}
void ManualByteSwap(char* bytes, int length) {
if (length == 1)
return;
EXPECT_EQ(0, length % 2);
for (int i = 0; i < length / 2; ++i) {
int j = (length - 1) - i;
using std::swap;
swap(bytes[i], bytes[j]);
}
}
template<typename T>
inline T UnalignedLoad(const char* p) {
static_assert(
sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8,
"Unexpected type size");
switch (sizeof(T)) {
case 1: return *reinterpret_cast<const T*>(p);
case 2:
return ABSL_INTERNAL_UNALIGNED_LOAD16(p);
case 4:
return ABSL_INTERNAL_UNALIGNED_LOAD32(p);
case 8:
return ABSL_INTERNAL_UNALIGNED_LOAD64(p);
default:
// Suppresses invalid "not all control paths return a value" on MSVC
return {};
}
}
template <typename T, typename ByteSwapper>
static void GBSwapHelper(const std::vector<T>& host_values_to_test,
const ByteSwapper& byte_swapper) {
// Test byte_swapper against a manual byte swap.
for (typename std::vector<T>::const_iterator it = host_values_to_test.begin();
it != host_values_to_test.end(); ++it) {
T host_value = *it;
char actual_value[sizeof(host_value)];
memcpy(actual_value, &host_value, sizeof(host_value));
byte_swapper(actual_value);
char expected_value[sizeof(host_value)];
memcpy(expected_value, &host_value, sizeof(host_value));
ManualByteSwap(expected_value, sizeof(host_value));
ASSERT_EQ(0, memcmp(actual_value, expected_value, sizeof(host_value)))
<< "Swap output for 0x" << std::hex << host_value << " does not match. "
<< "Expected: 0x" << UnalignedLoad<T>(expected_value) << "; "
<< "actual: 0x" << UnalignedLoad<T>(actual_value);
}
}
void Swap16(char* bytes) {
ABSL_INTERNAL_UNALIGNED_STORE16(
bytes, gbswap_16(ABSL_INTERNAL_UNALIGNED_LOAD16(bytes)));
}
void Swap32(char* bytes) {
ABSL_INTERNAL_UNALIGNED_STORE32(
bytes, gbswap_32(ABSL_INTERNAL_UNALIGNED_LOAD32(bytes)));
}
void Swap64(char* bytes) {
ABSL_INTERNAL_UNALIGNED_STORE64(
bytes, gbswap_64(ABSL_INTERNAL_UNALIGNED_LOAD64(bytes)));
}
TEST(EndianessTest, Uint16) {
GBSwapHelper(GenerateAllValuesForType<uint16_t>(), &Swap16);
}
TEST(EndianessTest, Uint32) {
GBSwapHelper(GenerateRandomIntegers<uint32_t>(kNumValuesToTest), &Swap32);
}
TEST(EndianessTest, Uint64) {
GBSwapHelper(GenerateRandomIntegers<uint64_t>(kNumValuesToTest), &Swap64);
}
TEST(EndianessTest, ghtonll_gntohll) {
// Test that absl::ghtonl compiles correctly
uint32_t test = 0x01234567;
EXPECT_EQ(absl::gntohl(absl::ghtonl(test)), test);
uint64_t comp = absl::ghtonll(kInitialNumber);
EXPECT_EQ(comp, kInitialInNetworkOrder);
comp = absl::gntohll(kInitialInNetworkOrder);
EXPECT_EQ(comp, kInitialNumber);
// Test that htonll and ntohll are each others' inverse functions on a
// somewhat assorted batch of numbers. 37 is chosen to not be anything
// particularly nice base 2.
uint64_t value = 1;
for (int i = 0; i < 100; ++i) {
comp = absl::ghtonll(absl::gntohll(value));
EXPECT_EQ(value, comp);
comp = absl::gntohll(absl::ghtonll(value));
EXPECT_EQ(value, comp);
value *= 37;
}
}
TEST(EndianessTest, little_endian) {
// Check little_endian uint16_t.
uint64_t comp = little_endian::FromHost16(k16Value);
EXPECT_EQ(comp, k16ValueLE);
comp = little_endian::ToHost16(k16ValueLE);
EXPECT_EQ(comp, k16Value);
// Check little_endian uint32_t.
comp = little_endian::FromHost32(k32Value);
EXPECT_EQ(comp, k32ValueLE);
comp = little_endian::ToHost32(k32ValueLE);
EXPECT_EQ(comp, k32Value);
// Check little_endian uint64_t.
comp = little_endian::FromHost64(k64Value);
EXPECT_EQ(comp, k64ValueLE);
comp = little_endian::ToHost64(k64ValueLE);
EXPECT_EQ(comp, k64Value);
// Check little-endian Load and store functions.
uint16_t u16Buf;
uint32_t u32Buf;
uint64_t u64Buf;
little_endian::Store16(&u16Buf, k16Value);
EXPECT_EQ(u16Buf, k16ValueLE);
comp = little_endian::Load16(&u16Buf);
EXPECT_EQ(comp, k16Value);
little_endian::Store32(&u32Buf, k32Value);
EXPECT_EQ(u32Buf, k32ValueLE);
comp = little_endian::Load32(&u32Buf);
EXPECT_EQ(comp, k32Value);
little_endian::Store64(&u64Buf, k64Value);
EXPECT_EQ(u64Buf, k64ValueLE);
comp = little_endian::Load64(&u64Buf);
EXPECT_EQ(comp, k64Value);
}
TEST(EndianessTest, big_endian) {
// Check big-endian Load and store functions.
uint16_t u16Buf;
uint32_t u32Buf;
uint64_t u64Buf;
unsigned char buffer[10];
big_endian::Store16(&u16Buf, k16Value);
EXPECT_EQ(u16Buf, k16ValueBE);
uint64_t comp = big_endian::Load16(&u16Buf);
EXPECT_EQ(comp, k16Value);
big_endian::Store32(&u32Buf, k32Value);
EXPECT_EQ(u32Buf, k32ValueBE);
comp = big_endian::Load32(&u32Buf);
EXPECT_EQ(comp, k32Value);
big_endian::Store64(&u64Buf, k64Value);
EXPECT_EQ(u64Buf, k64ValueBE);
comp = big_endian::Load64(&u64Buf);
EXPECT_EQ(comp, k64Value);
big_endian::Store16(buffer + 1, k16Value);
EXPECT_EQ(u16Buf, k16ValueBE);
comp = big_endian::Load16(buffer + 1);
EXPECT_EQ(comp, k16Value);
big_endian::Store32(buffer + 1, k32Value);
EXPECT_EQ(u32Buf, k32ValueBE);
comp = big_endian::Load32(buffer + 1);
EXPECT_EQ(comp, k32Value);
big_endian::Store64(buffer + 1, k64Value);
EXPECT_EQ(u64Buf, k64ValueBE);
comp = big_endian::Load64(buffer + 1);
EXPECT_EQ(comp, k64Value);
}
} // namespace
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,75 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/exception_safety_testing.h"
#include "gtest/gtest.h"
#include "absl/meta/type_traits.h"
namespace testing {
exceptions_internal::NoThrowTag nothrow_ctor;
exceptions_internal::StrongGuaranteeTagType strong_guarantee;
exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester() {
return {};
}
namespace exceptions_internal {
int countdown = -1;
ConstructorTracker* ConstructorTracker::current_tracker_instance_ = nullptr;
void MaybeThrow(absl::string_view msg, bool throw_bad_alloc) {
if (countdown-- == 0) {
if (throw_bad_alloc) throw TestBadAllocException(msg);
throw TestException(msg);
}
}
testing::AssertionResult FailureMessage(const TestException& e,
int countdown) noexcept {
return testing::AssertionFailure() << "Exception thrown from " << e.what();
}
std::string GetSpecString(TypeSpec spec) {
std::string out;
absl::string_view sep;
const auto append = [&](absl::string_view s) {
absl::StrAppend(&out, sep, s);
sep = " | ";
};
if (static_cast<bool>(TypeSpec::kNoThrowCopy & spec)) {
append("kNoThrowCopy");
}
if (static_cast<bool>(TypeSpec::kNoThrowMove & spec)) {
append("kNoThrowMove");
}
if (static_cast<bool>(TypeSpec::kNoThrowNew & spec)) {
append("kNoThrowNew");
}
return out;
}
std::string GetSpecString(AllocSpec spec) {
return static_cast<bool>(AllocSpec::kNoThrowAllocate & spec)
? "kNoThrowAllocate"
: "";
}
} // namespace exceptions_internal
} // namespace testing

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,42 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Testing utilities for ABSL types which throw exceptions.
#ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
#define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
#include "gtest/gtest.h"
#include "absl/base/config.h"
// ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception
// if exceptions are enabled, or for death with a specified text in the error
// message
#ifdef ABSL_HAVE_EXCEPTIONS
#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
EXPECT_THROW(expr, exception_t)
#elif defined(__ANDROID__)
// Android asserts do not log anywhere that gtest can currently inspect.
// So we expect exit, but cannot match the message.
#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
EXPECT_DEATH(expr, ".*")
#else
#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
EXPECT_DEATH_IF_SUPPORTED(expr, text)
#endif
#endif // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_

View File

@@ -0,0 +1,49 @@
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_BASE_INTERNAL_HIDE_PTR_H_
#define ABSL_BASE_INTERNAL_HIDE_PTR_H_
#include <cstdint>
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
// Arbitrary value with high bits set. Xor'ing with it is unlikely
// to map one valid pointer to another valid pointer.
constexpr uintptr_t HideMask() {
return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU;
}
// Hide a pointer from the leak checker. For internal use only.
// Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr
// and all objects reachable from ptr to be ignored by the leak checker.
template <class T>
inline uintptr_t HidePtr(T* ptr) {
return reinterpret_cast<uintptr_t>(ptr) ^ HideMask();
}
// Return a pointer that has been hidden from the leak checker.
// For internal use only.
template <class T>
inline T* UnhidePtr(uintptr_t hidden) {
return reinterpret_cast<T*>(hidden ^ HideMask());
}
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_

View File

@@ -0,0 +1,35 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef ABSL_BASE_INTERNAL_IDENTITY_H_
#define ABSL_BASE_INTERNAL_IDENTITY_H_
namespace absl {
inline namespace lts_2019_08_08 {
namespace internal {
template <typename T>
struct identity {
typedef T type;
};
template <typename T>
using identity_t = typename identity<T>::type;
} // namespace internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_IDENTITY_H_

View File

@@ -0,0 +1,107 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_
#include <type_traits>
#include "absl/base/internal/identity.h"
// File:
// This file define a macro that allows the creation of or emulation of C++17
// inline variables based on whether or not the feature is supported.
////////////////////////////////////////////////////////////////////////////////
// Macro: ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init)
//
// Description:
// Expands to the equivalent of an inline constexpr instance of the specified
// `type` and `name`, initialized to the value `init`. If the compiler being
// used is detected as supporting actual inline variables as a language
// feature, then the macro expands to an actual inline variable definition.
//
// Requires:
// `type` is a type that is usable in an extern variable declaration.
//
// Requires: `name` is a valid identifier
//
// Requires:
// `init` is an expression that can be used in the following definition:
// constexpr type name = init;
//
// Usage:
//
// // Equivalent to: `inline constexpr size_t variant_npos = -1;`
// ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, -1);
//
// Differences in implementation:
// For a direct, language-level inline variable, decltype(name) will be the
// type that was specified along with const qualification, whereas for
// emulated inline variables, decltype(name) may be different (in practice
// it will likely be a reference type).
////////////////////////////////////////////////////////////////////////////////
#ifdef __cpp_inline_variables
// Clang's -Wmissing-variable-declarations option erroneously warned that
// inline constexpr objects need to be pre-declared. This has now been fixed,
// but we will need to support this workaround for people building with older
// versions of clang.
//
// Bug: https://bugs.llvm.org/show_bug.cgi?id=35862
//
// Note:
// identity_t is used here so that the const and name are in the
// appropriate place for pointer types, reference types, function pointer
// types, etc..
#if defined(__clang__)
#define ABSL_INTERNAL_EXTERN_DECL(type, name) \
extern const ::absl::internal::identity_t<type> name;
#else // Otherwise, just define the macro to do nothing.
#define ABSL_INTERNAL_EXTERN_DECL(type, name)
#endif // defined(__clang__)
// See above comment at top of file for details.
#define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \
ABSL_INTERNAL_EXTERN_DECL(type, name) \
inline constexpr ::absl::internal::identity_t<type> name = init
#else
// See above comment at top of file for details.
//
// Note:
// identity_t is used here so that the const and name are in the
// appropriate place for pointer types, reference types, function pointer
// types, etc..
#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \
template <class /*AbslInternalDummy*/ = void> \
struct AbslInternalInlineVariableHolder##name { \
static constexpr ::absl::internal::identity_t<var_type> kInstance = init; \
}; \
\
template <class AbslInternalDummy> \
constexpr ::absl::internal::identity_t<var_type> \
AbslInternalInlineVariableHolder##name<AbslInternalDummy>::kInstance; \
\
static constexpr const ::absl::internal::identity_t<var_type>& \
name = /* NOLINT */ \
AbslInternalInlineVariableHolder##name<>::kInstance; \
static_assert(sizeof(void (*)(decltype(name))) != 0, \
"Silence unused variable warnings.")
#endif // __cpp_inline_variables
#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_

View File

@@ -0,0 +1,46 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_BASE_INLINE_VARIABLE_TESTING_H_
#define ABSL_BASE_INLINE_VARIABLE_TESTING_H_
#include "absl/base/internal/inline_variable.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace inline_variable_testing_internal {
struct Foo {
int value = 5;
};
ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5);
ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5);
ABSL_INTERNAL_INLINE_CONSTEXPR(void(*)(), inline_variable_fun_ptr, nullptr);
const Foo& get_foo_a();
const Foo& get_foo_b();
const int& get_int_a();
const int& get_int_b();
} // namespace inline_variable_testing_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INLINE_VARIABLE_TESTING_H_

View File

@@ -0,0 +1,187 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// absl::base_internal::Invoke(f, args...) is an implementation of
// INVOKE(f, args...) from section [func.require] of the C++ standard.
//
// [func.require]
// Define INVOKE (f, t1, t2, ..., tN) as follows:
// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
// and t1 is an object of type T or a reference to an object of type T or a
// reference to an object of a type derived from T;
// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
// class T and t1 is not one of the types described in the previous item;
// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
// an object of type T or a reference to an object of type T or a reference
// to an object of a type derived from T;
// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
// is not one of the types described in the previous item;
// 5. f(t1, t2, ..., tN) in all other cases.
//
// The implementation is SFINAE-friendly: substitution failure within Invoke()
// isn't an error.
#ifndef ABSL_BASE_INTERNAL_INVOKE_H_
#define ABSL_BASE_INTERNAL_INVOKE_H_
#include <algorithm>
#include <type_traits>
#include <utility>
#include "absl/meta/type_traits.h"
// The following code is internal implementation detail. See the comment at the
// top of this file for the API documentation.
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
// The five classes below each implement one of the clauses from the definition
// of INVOKE. The inner class template Accept<F, Args...> checks whether the
// clause is applicable; static function template Invoke(f, args...) does the
// invocation.
//
// By separating the clause selection logic from invocation we make sure that
// Invoke() does exactly what the standard says.
template <typename Derived>
struct StrippedAccept {
template <typename... Args>
struct Accept : Derived::template AcceptImpl<typename std::remove_cv<
typename std::remove_reference<Args>::type>::type...> {};
};
// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
// and t1 is an object of type T or a reference to an object of type T or a
// reference to an object of a type derived from T.
struct MemFunAndRef : StrippedAccept<MemFunAndRef> {
template <typename... Args>
struct AcceptImpl : std::false_type {};
template <typename MemFunType, typename C, typename Obj, typename... Args>
struct AcceptImpl<MemFunType C::*, Obj, Args...>
: std::integral_constant<bool, std::is_base_of<C, Obj>::value &&
absl::is_function<MemFunType>::value> {
};
template <typename MemFun, typename Obj, typename... Args>
static decltype((std::declval<Obj>().*
std::declval<MemFun>())(std::declval<Args>()...))
Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) {
return (std::forward<Obj>(obj).*
std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
}
};
// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
// class T and t1 is not one of the types described in the previous item.
struct MemFunAndPtr : StrippedAccept<MemFunAndPtr> {
template <typename... Args>
struct AcceptImpl : std::false_type {};
template <typename MemFunType, typename C, typename Ptr, typename... Args>
struct AcceptImpl<MemFunType C::*, Ptr, Args...>
: std::integral_constant<bool, !std::is_base_of<C, Ptr>::value &&
absl::is_function<MemFunType>::value> {
};
template <typename MemFun, typename Ptr, typename... Args>
static decltype(((*std::declval<Ptr>()).*
std::declval<MemFun>())(std::declval<Args>()...))
Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) {
return ((*std::forward<Ptr>(ptr)).*
std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
}
};
// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
// an object of type T or a reference to an object of type T or a reference
// to an object of a type derived from T.
struct DataMemAndRef : StrippedAccept<DataMemAndRef> {
template <typename... Args>
struct AcceptImpl : std::false_type {};
template <typename R, typename C, typename Obj>
struct AcceptImpl<R C::*, Obj>
: std::integral_constant<bool, std::is_base_of<C, Obj>::value &&
!absl::is_function<R>::value> {};
template <typename DataMem, typename Ref>
static decltype(std::declval<Ref>().*std::declval<DataMem>()) Invoke(
DataMem&& data_mem, Ref&& ref) {
return std::forward<Ref>(ref).*std::forward<DataMem>(data_mem);
}
};
// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
// is not one of the types described in the previous item.
struct DataMemAndPtr : StrippedAccept<DataMemAndPtr> {
template <typename... Args>
struct AcceptImpl : std::false_type {};
template <typename R, typename C, typename Ptr>
struct AcceptImpl<R C::*, Ptr>
: std::integral_constant<bool, !std::is_base_of<C, Ptr>::value &&
!absl::is_function<R>::value> {};
template <typename DataMem, typename Ptr>
static decltype((*std::declval<Ptr>()).*std::declval<DataMem>()) Invoke(
DataMem&& data_mem, Ptr&& ptr) {
return (*std::forward<Ptr>(ptr)).*std::forward<DataMem>(data_mem);
}
};
// f(t1, t2, ..., tN) in all other cases.
struct Callable {
// Callable doesn't have Accept because it's the last clause that gets picked
// when none of the previous clauses are applicable.
template <typename F, typename... Args>
static decltype(std::declval<F>()(std::declval<Args>()...)) Invoke(
F&& f, Args&&... args) {
return std::forward<F>(f)(std::forward<Args>(args)...);
}
};
// Resolves to the first matching clause.
template <typename... Args>
struct Invoker {
typedef typename std::conditional<
MemFunAndRef::Accept<Args...>::value, MemFunAndRef,
typename std::conditional<
MemFunAndPtr::Accept<Args...>::value, MemFunAndPtr,
typename std::conditional<
DataMemAndRef::Accept<Args...>::value, DataMemAndRef,
typename std::conditional<DataMemAndPtr::Accept<Args...>::value,
DataMemAndPtr, Callable>::type>::type>::
type>::type type;
};
// The result type of Invoke<F, Args...>.
template <typename F, typename... Args>
using InvokeT = decltype(Invoker<F, Args...>::type::Invoke(
std::declval<F>(), std::declval<Args>()...));
// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section
// [func.require] of the C++ standard.
template <typename F, typename... Args>
InvokeT<F, Args...> Invoke(F&& f, Args&&... args) {
return Invoker<F, Args...>::type::Invoke(std::forward<F>(f),
std::forward<Args>(args)...);
}
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_INVOKE_H_

View File

@@ -0,0 +1,621 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// A low-level allocator that can be used by other low-level
// modules without introducing dependency cycles.
// This allocator is slow and wasteful of memory;
// it should not be used when performance is key.
#include "absl/base/internal/low_level_alloc.h"
#include <type_traits>
#include "absl/base/call_once.h"
#include "absl/base/config.h"
#include "absl/base/internal/direct_mmap.h"
#include "absl/base/internal/scheduling_mode.h"
#include "absl/base/macros.h"
#include "absl/base/thread_annotations.h"
// LowLevelAlloc requires that the platform support low-level
// allocation of virtual memory. Platforms lacking this cannot use
// LowLevelAlloc.
#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
#ifndef _WIN32
#include <pthread.h>
#include <signal.h>
#include <sys/mman.h>
#include <unistd.h>
#else
#include <windows.h>
#endif
#include <string.h>
#include <algorithm>
#include <atomic>
#include <cerrno>
#include <cstddef>
#include <new> // for placement-new
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
// MAP_ANONYMOUS
#if defined(__APPLE__)
// For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is
// deprecated. In Darwin, MAP_ANON is all there is.
#if !defined MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif // !MAP_ANONYMOUS
#endif // __APPLE__
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
// A first-fit allocator with amortized logarithmic free() time.
// ---------------------------------------------------------------------------
static const int kMaxLevel = 30;
namespace {
// This struct describes one allocated block, or one free block.
struct AllocList {
struct Header {
// Size of entire region, including this field. Must be
// first. Valid in both allocated and unallocated blocks.
uintptr_t size;
// kMagicAllocated or kMagicUnallocated xor this.
uintptr_t magic;
// Pointer to parent arena.
LowLevelAlloc::Arena *arena;
// Aligns regions to 0 mod 2*sizeof(void*).
void *dummy_for_alignment;
} header;
// Next two fields: in unallocated blocks: freelist skiplist data
// in allocated blocks: overlaps with client data
// Levels in skiplist used.
int levels;
// Actually has levels elements. The AllocList node may not have room
// for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels().
AllocList *next[kMaxLevel];
};
} // namespace
// ---------------------------------------------------------------------------
// A trivial skiplist implementation. This is used to keep the freelist
// in address order while taking only logarithmic time per insert and delete.
// An integer approximation of log2(size/base)
// Requires size >= base.
static int IntLog2(size_t size, size_t base) {
int result = 0;
for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result)
result++;
}
// floor(size / 2**result) <= base < floor(size / 2**(result-1))
// => log2(size/(base+1)) <= result < 1+log2(size/base)
// => result ~= log2(size/base)
return result;
}
// Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
static int Random(uint32_t *state) {
uint32_t r = *state;
int result = 1;
while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
result++;
}
*state = r;
return result;
}
// Return a number of skiplist levels for a node of size bytes, where
// base is the minimum node size. Compute level=log2(size / base)+n
// where n is 1 if random is false and otherwise a random number generated with
// the standard distribution for a skiplist: See Random() above.
// Bigger nodes tend to have more skiplist levels due to the log2(size / base)
// term, so first-fit searches touch fewer nodes. "level" is clipped so
// level<kMaxLevel and next[level-1] will fit in the node.
// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
// max_fit is the maximum number of levels that will fit in a node for the
// given size. We can't return more than max_fit, no matter what the
// random number generator says.
size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
if (level > kMaxLevel-1) level = kMaxLevel - 1;
ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
return level;
}
// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
// points to the last element at level i in the AllocList less than *e, or is
// head if no such element exists.
static AllocList *LLA_SkiplistSearch(AllocList *head,
AllocList *e, AllocList **prev) {
AllocList *p = head;
for (int level = head->levels - 1; level >= 0; level--) {
for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
}
prev[level] = p;
}
return (head->levels == 0) ? nullptr : prev[0]->next[0];
}
// Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch.
// Requires that e->levels be previously set by the caller (using
// LLA_SkiplistLevels())
static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
AllocList **prev) {
LLA_SkiplistSearch(head, e, prev);
for (; head->levels < e->levels; head->levels++) { // extend prev pointers
prev[head->levels] = head; // to all *e's levels
}
for (int i = 0; i != e->levels; i++) { // add element to list
e->next[i] = prev[i]->next[i];
prev[i]->next[i] = e;
}
}
// Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch().
// Requires that e->levels be previous set by the caller (using
// LLA_SkiplistLevels())
static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
AllocList **prev) {
AllocList *found = LLA_SkiplistSearch(head, e, prev);
ABSL_RAW_CHECK(e == found, "element not in freelist");
for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
prev[i]->next[i] = e->next[i];
}
while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
head->levels--; // reduce head->levels if level unused
}
}
// ---------------------------------------------------------------------------
// Arena implementation
// Metadata for an LowLevelAlloc arena instance.
struct LowLevelAlloc::Arena {
// Constructs an arena with the given LowLevelAlloc flags.
explicit Arena(uint32_t flags_value);
base_internal::SpinLock mu;
// Head of free list, sorted by address
AllocList freelist GUARDED_BY(mu);
// Count of allocated blocks
int32_t allocation_count GUARDED_BY(mu);
// flags passed to NewArena
const uint32_t flags;
// Result of sysconf(_SC_PAGESIZE)
const size_t pagesize;
// Lowest power of two >= max(16, sizeof(AllocList))
const size_t roundup;
// Smallest allocation block size
const size_t min_size;
// PRNG state
uint32_t random GUARDED_BY(mu);
};
namespace {
using ArenaStorage = std::aligned_storage<sizeof(LowLevelAlloc::Arena),
alignof(LowLevelAlloc::Arena)>::type;
// Static storage space for the lazily-constructed, default global arena
// instances. We require this space because the whole point of LowLevelAlloc
// is to avoid relying on malloc/new.
ArenaStorage default_arena_storage;
ArenaStorage unhooked_arena_storage;
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
ArenaStorage unhooked_async_sig_safe_arena_storage;
#endif
// We must use LowLevelCallOnce here to construct the global arenas, rather than
// using function-level statics, to avoid recursively invoking the scheduler.
absl::once_flag create_globals_once;
void CreateGlobalArenas() {
new (&default_arena_storage)
LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook);
new (&unhooked_arena_storage) LowLevelAlloc::Arena(0);
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
new (&unhooked_async_sig_safe_arena_storage)
LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe);
#endif
}
// Returns a global arena that does not call into hooks. Used by NewArena()
// when kCallMallocHook is not set.
LowLevelAlloc::Arena* UnhookedArena() {
base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
return reinterpret_cast<LowLevelAlloc::Arena*>(&unhooked_arena_storage);
}
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
// Returns a global arena that is async-signal safe. Used by NewArena() when
// kAsyncSignalSafe is set.
LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {
base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
return reinterpret_cast<LowLevelAlloc::Arena *>(
&unhooked_async_sig_safe_arena_storage);
}
#endif
} // namespace
// Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
return reinterpret_cast<LowLevelAlloc::Arena*>(&default_arena_storage);
}
// magic numbers to identify allocated and unallocated blocks
static const uintptr_t kMagicAllocated = 0x4c833e95U;
static const uintptr_t kMagicUnallocated = ~kMagicAllocated;
namespace {
class SCOPED_LOCKABLE ArenaLock {
public:
explicit ArenaLock(LowLevelAlloc::Arena *arena)
EXCLUSIVE_LOCK_FUNCTION(arena->mu)
: arena_(arena) {
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
sigset_t all;
sigfillset(&all);
mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0;
}
#endif
arena_->mu.Lock();
}
~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
void Leave() UNLOCK_FUNCTION() {
arena_->mu.Unlock();
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if (mask_valid_) {
const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr);
if (err != 0) {
ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err);
}
}
#endif
left_ = true;
}
private:
bool left_ = false; // whether left region
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
bool mask_valid_ = false;
sigset_t mask_; // old mask of blocked signals
#endif
LowLevelAlloc::Arena *arena_;
ArenaLock(const ArenaLock &) = delete;
ArenaLock &operator=(const ArenaLock &) = delete;
};
} // namespace
// create an appropriate magic number for an object at "ptr"
// "magic" should be kMagicAllocated or kMagicUnallocated
inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {
return magic ^ reinterpret_cast<uintptr_t>(ptr);
}
namespace {
size_t GetPageSize() {
#ifdef _WIN32
SYSTEM_INFO system_info;
GetSystemInfo(&system_info);
return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
#elif defined(__wasm__) || defined(__asmjs__)
return getpagesize();
#else
return sysconf(_SC_PAGESIZE);
#endif
}
size_t RoundedUpBlockSize() {
// Round up block sizes to a power of two close to the header size.
size_t roundup = 16;
while (roundup < sizeof(AllocList::Header)) {
roundup += roundup;
}
return roundup;
}
} // namespace
LowLevelAlloc::Arena::Arena(uint32_t flags_value)
: mu(base_internal::SCHEDULE_KERNEL_ONLY),
allocation_count(0),
flags(flags_value),
pagesize(GetPageSize()),
roundup(RoundedUpBlockSize()),
min_size(2 * roundup),
random(0) {
freelist.header.size = 0;
freelist.header.magic =
Magic(kMagicUnallocated, &freelist.header);
freelist.header.arena = this;
freelist.levels = 0;
memset(freelist.next, 0, sizeof(freelist.next));
}
// L < meta_data_arena->mu
LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags) {
Arena *meta_data_arena = DefaultArena();
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
meta_data_arena = UnhookedAsyncSigSafeArena();
} else // NOLINT(readability/braces)
#endif
if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
meta_data_arena = UnhookedArena();
}
Arena *result =
new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags);
return result;
}
// L < arena->mu, L < arena->arena->mu
bool LowLevelAlloc::DeleteArena(Arena *arena) {
ABSL_RAW_CHECK(
arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(),
"may not delete default arena");
ArenaLock section(arena);
if (arena->allocation_count != 0) {
section.Leave();
return false;
}
while (arena->freelist.next[0] != nullptr) {
AllocList *region = arena->freelist.next[0];
size_t size = region->header.size;
arena->freelist.next[0] = region->next[0];
ABSL_RAW_CHECK(
region->header.magic == Magic(kMagicUnallocated, &region->header),
"bad magic number in DeleteArena()");
ABSL_RAW_CHECK(region->header.arena == arena,
"bad arena pointer in DeleteArena()");
ABSL_RAW_CHECK(size % arena->pagesize == 0,
"empty arena has non-page-aligned block size");
ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0,
"empty arena has non-page-aligned block");
int munmap_result;
#ifdef _WIN32
munmap_result = VirtualFree(region, 0, MEM_RELEASE);
ABSL_RAW_CHECK(munmap_result != 0,
"LowLevelAlloc::DeleteArena: VitualFree failed");
#else
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
munmap_result = munmap(region, size);
} else {
munmap_result = base_internal::DirectMunmap(region, size);
}
#else
munmap_result = munmap(region, size);
#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if (munmap_result != 0) {
ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d",
errno);
}
#endif // _WIN32
}
section.Leave();
arena->~Arena();
Free(arena);
return true;
}
// ---------------------------------------------------------------------------
// Addition, checking for overflow. The intent is to die if an external client
// manages to push through a request that would cause arithmetic to fail.
static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) {
uintptr_t sum = a + b;
ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow");
return sum;
}
// Return value rounded up to next multiple of align.
// align must be a power of two.
static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) {
return CheckedAdd(addr, align - 1) & ~(align - 1);
}
// Equivalent to "return prev->next[i]" but with sanity checking
// that the freelist is in the correct order, that it
// consists of regions marked "unallocated", and that no two regions
// are adjacent in memory (they should have been coalesced).
// L < arena->mu
static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()");
AllocList *next = prev->next[i];
if (next != nullptr) {
ABSL_RAW_CHECK(
next->header.magic == Magic(kMagicUnallocated, &next->header),
"bad magic number in Next()");
ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()");
if (prev != &arena->freelist) {
ABSL_RAW_CHECK(prev < next, "unordered freelist");
ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
reinterpret_cast<char *>(next),
"malformed freelist");
}
}
return next;
}
// Coalesce list item "a" with its successor if they are adjacent.
static void Coalesce(AllocList *a) {
AllocList *n = a->next[0];
if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size ==
reinterpret_cast<char *>(n)) {
LowLevelAlloc::Arena *arena = a->header.arena;
a->header.size += n->header.size;
n->header.magic = 0;
n->header.arena = nullptr;
AllocList *prev[kMaxLevel];
LLA_SkiplistDelete(&arena->freelist, n, prev);
LLA_SkiplistDelete(&arena->freelist, a, prev);
a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size,
&arena->random);
LLA_SkiplistInsert(&arena->freelist, a, prev);
}
}
// Adds block at location "v" to the free list
// L >= arena->mu
static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
AllocList *f = reinterpret_cast<AllocList *>(
reinterpret_cast<char *>(v) - sizeof (f->header));
ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
"bad magic number in AddToFreelist()");
ABSL_RAW_CHECK(f->header.arena == arena,
"bad arena pointer in AddToFreelist()");
f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size,
&arena->random);
AllocList *prev[kMaxLevel];
LLA_SkiplistInsert(&arena->freelist, f, prev);
f->header.magic = Magic(kMagicUnallocated, &f->header);
Coalesce(f); // maybe coalesce with successor
Coalesce(prev[0]); // maybe coalesce with predecessor
}
// Frees storage allocated by LowLevelAlloc::Alloc().
// L < arena->mu
void LowLevelAlloc::Free(void *v) {
if (v != nullptr) {
AllocList *f = reinterpret_cast<AllocList *>(
reinterpret_cast<char *>(v) - sizeof (f->header));
ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
"bad magic number in Free()");
LowLevelAlloc::Arena *arena = f->header.arena;
ArenaLock section(arena);
AddToFreelist(v, arena);
ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
arena->allocation_count--;
section.Leave();
}
}
// allocates and returns a block of size bytes, to be freed with Free()
// L < arena->mu
static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
void *result = nullptr;
if (request != 0) {
AllocList *s; // will point to region that satisfies request
ArenaLock section(arena);
// round up with header
size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)),
arena->roundup);
for (;;) { // loop until we find a suitable region
// find the minimum levels that a block of this size must have
int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
if (i < arena->freelist.levels) { // potential blocks exist
AllocList *before = &arena->freelist; // predecessor of s
while ((s = Next(i, before, arena)) != nullptr &&
s->header.size < req_rnd) {
before = s;
}
if (s != nullptr) { // we found a region
break;
}
}
// we unlock before mmap() both because mmap() may call a callback hook,
// and because it may be slow.
arena->mu.Unlock();
// mmap generous 64K chunks to decrease
// the chances/impact of fragmentation:
size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
void *new_pages;
#ifdef _WIN32
new_pages = VirtualAlloc(0, new_pages_size,
MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
#else
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
new_pages = base_internal::DirectMmap(nullptr, new_pages_size,
PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
} else {
new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
}
#else
new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if (new_pages == MAP_FAILED) {
ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
}
#endif // _WIN32
arena->mu.Lock();
s = reinterpret_cast<AllocList *>(new_pages);
s->header.size = new_pages_size;
// Pretend the block is allocated; call AddToFreelist() to free it.
s->header.magic = Magic(kMagicAllocated, &s->header);
s->header.arena = arena;
AddToFreelist(&s->levels, arena); // insert new region into free list
}
AllocList *prev[kMaxLevel];
LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
// s points to the first free region that's big enough
if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
// big enough to split
AllocList *n = reinterpret_cast<AllocList *>
(req_rnd + reinterpret_cast<char *>(s));
n->header.size = s->header.size - req_rnd;
n->header.magic = Magic(kMagicAllocated, &n->header);
n->header.arena = arena;
s->header.size = req_rnd;
AddToFreelist(&n->levels, arena);
}
s->header.magic = Magic(kMagicAllocated, &s->header);
ABSL_RAW_CHECK(s->header.arena == arena, "");
arena->allocation_count++;
section.Leave();
result = &s->levels;
}
ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
return result;
}
void *LowLevelAlloc::Alloc(size_t request) {
void *result = DoAllocWithArena(request, DefaultArena());
return result;
}
void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena");
void *result = DoAllocWithArena(request, arena);
return result;
}
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_LOW_LEVEL_ALLOC_MISSING

View File

@@ -0,0 +1,126 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
#define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
// A simple thread-safe memory allocator that does not depend on
// mutexes or thread-specific data. It is intended to be used
// sparingly, and only when malloc() would introduce an unwanted
// dependency, such as inside the heap-checker, or the Mutex
// implementation.
// IWYU pragma: private, include "base/low_level_alloc.h"
#include <sys/types.h>
#include <cstdint>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
// LowLevelAlloc requires that the platform support low-level
// allocation of virtual memory. Platforms lacking this cannot use
// LowLevelAlloc.
#ifdef ABSL_LOW_LEVEL_ALLOC_MISSING
#error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set
#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32)
#define ABSL_LOW_LEVEL_ALLOC_MISSING 1
#endif
// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows or
// asm.js / WebAssembly.
// See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html
// for more information.
#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set
#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__)
#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1
#endif
#include <cstddef>
#include "absl/base/port.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
class LowLevelAlloc {
public:
struct Arena; // an arena from which memory may be allocated
// Returns a pointer to a block of at least "request" bytes
// that have been newly allocated from the specific arena.
// for Alloc() call the DefaultArena() is used.
// Returns 0 if passed request==0.
// Does not return 0 under other circumstances; it crashes if memory
// is not available.
static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook);
static void *AllocWithArena(size_t request, Arena *arena)
ABSL_ATTRIBUTE_SECTION(malloc_hook);
// Deallocates a region of memory that was previously allocated with
// Alloc(). Does nothing if passed 0. "s" must be either 0,
// or must have been returned from a call to Alloc() and not yet passed to
// Free() since that call to Alloc(). The space is returned to the arena
// from which it was allocated.
static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook);
// ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free
// are to put all callers of MallocHook::Invoke* in this module
// into special section,
// so that MallocHook::GetCallerStackTrace can function accurately.
// Create a new arena.
// The root metadata for the new arena is allocated in the
// meta_data_arena; the DefaultArena() can be passed for meta_data_arena.
// These values may be ored into flags:
enum {
// Report calls to Alloc() and Free() via the MallocHook interface.
// Set in the DefaultArena.
kCallMallocHook = 0x0001,
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
// Make calls to Alloc(), Free() be async-signal-safe. Not set in
// DefaultArena(). Not supported on all platforms.
kAsyncSignalSafe = 0x0002,
#endif
};
// Construct a new arena. The allocation of the underlying metadata honors
// the provided flags. For example, the call NewArena(kAsyncSignalSafe)
// is itself async-signal-safe, as well as generatating an arena that provides
// async-signal-safe Alloc/Free.
static Arena *NewArena(int32_t flags);
// Destroys an arena allocated by NewArena and returns true,
// provided no allocated blocks remain in the arena.
// If allocated blocks remain in the arena, does nothing and
// returns false.
// It is illegal to attempt to destroy the DefaultArena().
static bool DeleteArena(Arena *arena);
// The default arena that always exists.
static Arena *DefaultArena();
private:
LowLevelAlloc(); // no instances
};
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_

View File

@@ -0,0 +1,160 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/low_level_alloc.h"
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <thread> // NOLINT(build/c++11)
#include <unordered_map>
#include <utility>
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
namespace {
// This test doesn't use gtest since it needs to test that everything
// works before main().
#define TEST_ASSERT(x) \
if (!(x)) { \
printf("TEST_ASSERT(%s) FAILED ON LINE %d\n", #x, __LINE__); \
abort(); \
}
// a block of memory obtained from the allocator
struct BlockDesc {
char *ptr; // pointer to memory
int len; // number of bytes
int fill; // filled with data starting with this
};
// Check that the pattern placed in the block d
// by RandomizeBlockDesc is still there.
static void CheckBlockDesc(const BlockDesc &d) {
for (int i = 0; i != d.len; i++) {
TEST_ASSERT((d.ptr[i] & 0xff) == ((d.fill + i) & 0xff));
}
}
// Fill the block "*d" with a pattern
// starting with a random byte.
static void RandomizeBlockDesc(BlockDesc *d) {
d->fill = rand() & 0xff;
for (int i = 0; i != d->len; i++) {
d->ptr[i] = (d->fill + i) & 0xff;
}
}
// Use to indicate to the malloc hooks that
// this calls is from LowLevelAlloc.
static bool using_low_level_alloc = false;
// n times, toss a coin, and based on the outcome
// either allocate a new block or deallocate an old block.
// New blocks are placed in a std::unordered_map with a random key
// and initialized with RandomizeBlockDesc().
// If keys conflict, the older block is freed.
// Old blocks are always checked with CheckBlockDesc()
// before being freed. At the end of the run,
// all remaining allocated blocks are freed.
// If use_new_arena is true, use a fresh arena, and then delete it.
// If call_malloc_hook is true and user_arena is true,
// allocations and deallocations are reported via the MallocHook
// interface.
static void Test(bool use_new_arena, bool call_malloc_hook, int n) {
typedef std::unordered_map<int, BlockDesc> AllocMap;
AllocMap allocated;
AllocMap::iterator it;
BlockDesc block_desc;
int rnd;
LowLevelAlloc::Arena *arena = 0;
if (use_new_arena) {
int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0;
arena = LowLevelAlloc::NewArena(flags);
}
for (int i = 0; i != n; i++) {
if (i != 0 && i % 10000 == 0) {
printf(".");
fflush(stdout);
}
switch (rand() & 1) { // toss a coin
case 0: // coin came up heads: add a block
using_low_level_alloc = true;
block_desc.len = rand() & 0x3fff;
block_desc.ptr =
reinterpret_cast<char *>(
arena == 0
? LowLevelAlloc::Alloc(block_desc.len)
: LowLevelAlloc::AllocWithArena(block_desc.len, arena));
using_low_level_alloc = false;
RandomizeBlockDesc(&block_desc);
rnd = rand();
it = allocated.find(rnd);
if (it != allocated.end()) {
CheckBlockDesc(it->second);
using_low_level_alloc = true;
LowLevelAlloc::Free(it->second.ptr);
using_low_level_alloc = false;
it->second = block_desc;
} else {
allocated[rnd] = block_desc;
}
break;
case 1: // coin came up tails: remove a block
it = allocated.begin();
if (it != allocated.end()) {
CheckBlockDesc(it->second);
using_low_level_alloc = true;
LowLevelAlloc::Free(it->second.ptr);
using_low_level_alloc = false;
allocated.erase(it);
}
break;
}
}
// remove all remaining blocks
while ((it = allocated.begin()) != allocated.end()) {
CheckBlockDesc(it->second);
using_low_level_alloc = true;
LowLevelAlloc::Free(it->second.ptr);
using_low_level_alloc = false;
allocated.erase(it);
}
if (use_new_arena) {
TEST_ASSERT(LowLevelAlloc::DeleteArena(arena));
}
}
// LowLevelAlloc is designed to be safe to call before main().
static struct BeforeMain {
BeforeMain() {
Test(false, false, 50000);
Test(true, false, 50000);
Test(true, true, 50000);
}
} before_main;
} // namespace
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
int main(int argc, char *argv[]) {
// The actual test runs in the global constructor of `before_main`.
printf("PASS\n");
return 0;
}

View File

@@ -0,0 +1,107 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Core interfaces and definitions used by by low-level interfaces such as
// SpinLock.
#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
#include "absl/base/internal/scheduling_mode.h"
#include "absl/base/macros.h"
// The following two declarations exist so SchedulingGuard may friend them with
// the appropriate language linkage. These callbacks allow libc internals, such
// as function level statics, to schedule cooperatively when locking.
extern "C" bool __google_disable_rescheduling(void);
extern "C" void __google_enable_rescheduling(bool disable_result);
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
class SchedulingHelper; // To allow use of SchedulingGuard.
class SpinLock; // To allow use of SchedulingGuard.
// SchedulingGuard
// Provides guard semantics that may be used to disable cooperative rescheduling
// of the calling thread within specific program blocks. This is used to
// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative
// scheduling depends on.
//
// Domain implementations capable of rescheduling in reaction to involuntary
// kernel thread actions (e.g blocking due to a pagefault or syscall) must
// guarantee that an annotated thread is not allowed to (cooperatively)
// reschedule until the annotated region is complete.
//
// It is an error to attempt to use a cooperatively scheduled resource (e.g.
// Mutex) within a rescheduling-disabled region.
//
// All methods are async-signal safe.
class SchedulingGuard {
public:
// Returns true iff the calling thread may be cooperatively rescheduled.
static bool ReschedulingIsAllowed();
private:
// Disable cooperative rescheduling of the calling thread. It may still
// initiate scheduling operations (e.g. wake-ups), however, it may not itself
// reschedule. Nestable. The returned result is opaque, clients should not
// attempt to interpret it.
// REQUIRES: Result must be passed to a pairing EnableScheduling().
static bool DisableRescheduling();
// Marks the end of a rescheduling disabled region, previously started by
// DisableRescheduling().
// REQUIRES: Pairs with innermost call (and result) of DisableRescheduling().
static void EnableRescheduling(bool disable_result);
// A scoped helper for {Disable, Enable}Rescheduling().
// REQUIRES: destructor must run in same thread as constructor.
struct ScopedDisable {
ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); }
~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); }
bool disabled;
};
// Access to SchedulingGuard is explicitly white-listed.
friend class SchedulingHelper;
friend class SpinLock;
SchedulingGuard(const SchedulingGuard&) = delete;
SchedulingGuard& operator=(const SchedulingGuard&) = delete;
};
//------------------------------------------------------------------------------
// End of public interfaces.
//------------------------------------------------------------------------------
inline bool SchedulingGuard::ReschedulingIsAllowed() {
return false;
}
inline bool SchedulingGuard::DisableRescheduling() {
return false;
}
inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) {
return;
}
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_

View File

@@ -0,0 +1,52 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
#define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
// This header defines two macros:
//
// If the platform supports thread-local storage:
//
// * ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a
// thread-local variable
// * ABSL_PER_THREAD_TLS is 1
//
// Otherwise:
//
// * ABSL_PER_THREAD_TLS_KEYWORD is empty
// * ABSL_PER_THREAD_TLS is 0
//
// Microsoft C supports thread-local storage.
// GCC supports it if the appropriate version of glibc is available,
// which the programmer can indicate by defining ABSL_HAVE_TLS
#include "absl/base/port.h" // For ABSL_HAVE_TLS
#if defined(ABSL_PER_THREAD_TLS)
#error ABSL_PER_THREAD_TLS cannot be directly set
#elif defined(ABSL_PER_THREAD_TLS_KEYWORD)
#error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set
#elif defined(ABSL_HAVE_TLS)
#define ABSL_PER_THREAD_TLS_KEYWORD __thread
#define ABSL_PER_THREAD_TLS 1
#elif defined(_MSC_VER)
#define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread)
#define ABSL_PER_THREAD_TLS 1
#else
#define ABSL_PER_THREAD_TLS_KEYWORD
#define ABSL_PER_THREAD_TLS 0
#endif
#endif // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_

View File

@@ -0,0 +1,33 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
#define ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
// ABSL_PRETTY_FUNCTION
//
// In C++11, __func__ gives the undecorated name of the current function. That
// is, "main", not "int main()". Various compilers give extra macros to get the
// decorated function name, including return type and arguments, to
// differentiate between overload sets. ABSL_PRETTY_FUNCTION is a portable
// version of these macros which forwards to the correct macro on each compiler.
#if defined(_MSC_VER)
#define ABSL_PRETTY_FUNCTION __FUNCSIG__
#elif defined(__GNUC__)
#define ABSL_PRETTY_FUNCTION __PRETTY_FUNCTION__
#else
#error "Unsupported compiler"
#endif
#endif // ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_

View File

@@ -0,0 +1,237 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/raw_logging.h"
#include <stddef.h>
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/atomic_hook.h"
#include "absl/base/log_severity.h"
// We know how to perform low-level writes to stderr in POSIX and Windows. For
// these platforms, we define the token ABSL_LOW_LEVEL_WRITE_SUPPORTED.
// Much of raw_logging.cc becomes a no-op when we can't output messages,
// although a FATAL ABSL_RAW_LOG message will still abort the process.
// ABSL_HAVE_POSIX_WRITE is defined when the platform provides posix write()
// (as from unistd.h)
//
// This preprocessor token is also defined in raw_io.cc. If you need to copy
// this, consider moving both to config.h instead.
#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
defined(__Fuchsia__) || defined(__native_client__) || \
defined(__EMSCRIPTEN__)
#include <unistd.h>
#define ABSL_HAVE_POSIX_WRITE 1
#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
#else
#undef ABSL_HAVE_POSIX_WRITE
#endif
// ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall
// syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len);
// for low level operations that want to avoid libc.
#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__)
#include <sys/syscall.h>
#define ABSL_HAVE_SYSCALL_WRITE 1
#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
#else
#undef ABSL_HAVE_SYSCALL_WRITE
#endif
#ifdef _WIN32
#include <io.h>
#define ABSL_HAVE_RAW_IO 1
#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
#else
#undef ABSL_HAVE_RAW_IO
#endif
// TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
// Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a
// whitelisted set of platforms for which we expect not to be able to raw log.
ABSL_CONST_INIT static absl::base_internal::AtomicHook<
absl::raw_logging_internal::LogPrefixHook> log_prefix_hook;
ABSL_CONST_INIT static absl::base_internal::AtomicHook<
absl::raw_logging_internal::AbortHook> abort_hook;
#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
static const char kTruncated[] = " ... (message truncated)\n";
// sprintf the format to the buffer, adjusting *buf and *size to reflect the
// consumed bytes, and return whether the message fit without truncation. If
// truncation occurred, if possible leave room in the buffer for the message
// kTruncated[].
inline static bool VADoRawLog(char** buf, int* size, const char* format,
va_list ap) ABSL_PRINTF_ATTRIBUTE(3, 0);
inline static bool VADoRawLog(char** buf, int* size,
const char* format, va_list ap) {
int n = vsnprintf(*buf, *size, format, ap);
bool result = true;
if (n < 0 || n > *size) {
result = false;
if (static_cast<size_t>(*size) > sizeof(kTruncated)) {
n = *size - sizeof(kTruncated); // room for truncation message
} else {
n = 0; // no room for truncation message
}
}
*size -= n;
*buf += n;
return result;
}
#endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED
static constexpr int kLogBufSize = 3000;
namespace {
// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths
// that invoke malloc() and getenv() that might acquire some locks.
// Helper for RawLog below.
// *DoRawLog writes to *buf of *size and move them past the written portion.
// It returns true iff there was no overflow or error.
bool DoRawLog(char** buf, int* size, const char* format, ...)
ABSL_PRINTF_ATTRIBUTE(3, 4);
bool DoRawLog(char** buf, int* size, const char* format, ...) {
va_list ap;
va_start(ap, format);
int n = vsnprintf(*buf, *size, format, ap);
va_end(ap);
if (n < 0 || n > *size) return false;
*size -= n;
*buf += n;
return true;
}
void RawLogVA(absl::LogSeverity severity, const char* file, int line,
const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0);
void RawLogVA(absl::LogSeverity severity, const char* file, int line,
const char* format, va_list ap) {
char buffer[kLogBufSize];
char* buf = buffer;
int size = sizeof(buffer);
#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
bool enabled = true;
#else
bool enabled = false;
#endif
#ifdef ABSL_MIN_LOG_LEVEL
if (severity < static_cast<absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) &&
severity < absl::LogSeverity::kFatal) {
enabled = false;
}
#endif
auto log_prefix_hook_ptr = log_prefix_hook.Load();
if (log_prefix_hook_ptr) {
enabled = log_prefix_hook_ptr(severity, file, line, &buf, &size);
} else {
if (enabled) {
DoRawLog(&buf, &size, "[%s : %d] RAW: ", file, line);
}
}
const char* const prefix_end = buf;
#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
if (enabled) {
bool no_chop = VADoRawLog(&buf, &size, format, ap);
if (no_chop) {
DoRawLog(&buf, &size, "\n");
} else {
DoRawLog(&buf, &size, "%s", kTruncated);
}
absl::raw_logging_internal::SafeWriteToStderr(buffer, strlen(buffer));
}
#else
static_cast<void>(format);
static_cast<void>(ap);
#endif
// Abort the process after logging a FATAL message, even if the output itself
// was suppressed.
if (severity == absl::LogSeverity::kFatal) {
abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize);
abort();
}
}
} // namespace
namespace absl {
inline namespace lts_2019_08_08 {
namespace raw_logging_internal {
void SafeWriteToStderr(const char *s, size_t len) {
#if defined(ABSL_HAVE_SYSCALL_WRITE)
syscall(SYS_write, STDERR_FILENO, s, len);
#elif defined(ABSL_HAVE_POSIX_WRITE)
write(STDERR_FILENO, s, len);
#elif defined(ABSL_HAVE_RAW_IO)
_write(/* stderr */ 2, s, len);
#else
// stderr logging unsupported on this platform
(void) s;
(void) len;
#endif
}
void RawLog(absl::LogSeverity severity, const char* file, int line,
const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
void RawLog(absl::LogSeverity severity, const char* file, int line,
const char* format, ...) {
va_list ap;
va_start(ap, format);
RawLogVA(severity, file, line, format, ap);
va_end(ap);
}
// Non-formatting version of RawLog().
//
// TODO(gfalcon): When string_view no longer depends on base, change this
// interface to take its message as a string_view instead.
static void DefaultInternalLog(absl::LogSeverity severity, const char* file,
int line, const std::string& message) {
RawLog(severity, file, line, "%s", message.c_str());
}
bool RawLoggingFullySupported() {
#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
return true;
#else // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
return false;
#endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
}
ABSL_CONST_INIT absl::base_internal::AtomicHook<InternalLogFunction>
internal_log_function(DefaultInternalLog);
void RegisterInternalLogFunction(InternalLogFunction func) {
internal_log_function.Store(func);
}
} // namespace raw_logging_internal
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,183 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Thread-safe logging routines that do not allocate any memory or
// acquire any locks, and can therefore be used by low-level memory
// allocation, synchronization, and signal-handling code.
#ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_
#define ABSL_BASE_INTERNAL_RAW_LOGGING_H_
#include <string>
#include "absl/base/attributes.h"
#include "absl/base/internal/atomic_hook.h"
#include "absl/base/log_severity.h"
#include "absl/base/macros.h"
#include "absl/base/port.h"
// This is similar to LOG(severity) << format..., but
// * it is to be used ONLY by low-level modules that can't use normal LOG()
// * it is designed to be a low-level logger that does not allocate any
// memory and does not need any locks, hence:
// * it logs straight and ONLY to STDERR w/o buffering
// * it uses an explicit printf-format and arguments list
// * it will silently chop off really long message strings
// Usage example:
// ABSL_RAW_LOG(ERROR, "Failed foo with %i: %s", status, error);
// This will print an almost standard log line like this to stderr only:
// E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file
#define ABSL_RAW_LOG(severity, ...) \
do { \
constexpr const char* absl_raw_logging_internal_basename = \
::absl::raw_logging_internal::Basename(__FILE__, \
sizeof(__FILE__) - 1); \
::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \
absl_raw_logging_internal_basename, \
__LINE__, __VA_ARGS__); \
} while (0)
// Similar to CHECK(condition) << message, but for low-level modules:
// we use only ABSL_RAW_LOG that does not allocate memory.
// We do not want to provide args list here to encourage this usage:
// if (!cond) ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args);
// so that the args are not computed when not needed.
#define ABSL_RAW_CHECK(condition, message) \
do { \
if (ABSL_PREDICT_FALSE(!(condition))) { \
ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \
} \
} while (0)
// ABSL_INTERNAL_LOG and ABSL_INTERNAL_CHECK work like the RAW variants above,
// except that if the richer log library is linked into the binary, we dispatch
// to that instead. This is potentially useful for internal logging and
// assertions, where we are using RAW_LOG neither for its async-signal-safety
// nor for its non-allocating nature, but rather because raw logging has very
// few other dependencies.
//
// The API is a subset of the above: each macro only takes two arguments. Use
// StrCat if you need to build a richer message.
#define ABSL_INTERNAL_LOG(severity, message) \
do { \
constexpr const char* absl_raw_logging_internal_basename = \
::absl::raw_logging_internal::Basename(__FILE__, \
sizeof(__FILE__) - 1); \
::absl::raw_logging_internal::internal_log_function( \
ABSL_RAW_LOGGING_INTERNAL_##severity, \
absl_raw_logging_internal_basename, __LINE__, message); \
} while (0)
#define ABSL_INTERNAL_CHECK(condition, message) \
do { \
if (ABSL_PREDICT_FALSE(!(condition))) { \
std::string death_message = "Check " #condition " failed: "; \
death_message += std::string(message); \
ABSL_INTERNAL_LOG(FATAL, death_message); \
} \
} while (0)
#define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo
#define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning
#define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError
#define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal
#define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \
::absl::NormalizeLogSeverity(severity)
namespace absl {
inline namespace lts_2019_08_08 {
namespace raw_logging_internal {
// Helper function to implement ABSL_RAW_LOG
// Logs format... at "severity" level, reporting it
// as called from file:line.
// This does not allocate memory or acquire locks.
void RawLog(absl::LogSeverity severity, const char* file, int line,
const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
// Writes the provided buffer directly to stderr, in a safe, low-level manner.
//
// In POSIX this means calling write(), which is async-signal safe and does
// not malloc. If the platform supports the SYS_write syscall, we invoke that
// directly to side-step any libc interception.
void SafeWriteToStderr(const char *s, size_t len);
// compile-time function to get the "base" filename, that is, the part of
// a filename after the last "/" or "\" path separator. The search starts at
// the end of the string; the second parameter is the length of the string.
constexpr const char* Basename(const char* fname, int offset) {
return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\'
? fname + offset
: Basename(fname, offset - 1);
}
// For testing only.
// Returns true if raw logging is fully supported. When it is not
// fully supported, no messages will be emitted, but a log at FATAL
// severity will cause an abort.
//
// TODO(gfalcon): Come up with a better name for this method.
bool RawLoggingFullySupported();
// Function type for a raw_logging customization hook for suppressing messages
// by severity, and for writing custom prefixes on non-suppressed messages.
//
// The installed hook is called for every raw log invocation. The message will
// be logged to stderr only if the hook returns true. FATAL errors will cause
// the process to abort, even if writing to stderr is suppressed. The hook is
// also provided with an output buffer, where it can write a custom log message
// prefix.
//
// The raw_logging system does not allocate memory or grab locks. User-provided
// hooks must avoid these operations, and must not throw exceptions.
//
// 'severity' is the severity level of the message being written.
// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
// was located.
// 'buffer' and 'buf_size' are pointers to the buffer and buffer size. If the
// hook writes a prefix, it must increment *buffer and decrement *buf_size
// accordingly.
using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file,
int line, char** buffer, int* buf_size);
// Function type for a raw_logging customization hook called to abort a process
// when a FATAL message is logged. If the provided AbortHook() returns, the
// logging system will call abort().
//
// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
// was located.
// The null-terminated logged message lives in the buffer between 'buf_start'
// and 'buf_end'. 'prefix_end' points to the first non-prefix character of the
// buffer (as written by the LogPrefixHook.)
using AbortHook = void (*)(const char* file, int line, const char* buf_start,
const char* prefix_end, const char* buf_end);
// Internal logging function for ABSL_INTERNAL_LOG to dispatch to.
//
// TODO(gfalcon): When string_view no longer depends on base, change this
// interface to take its message as a string_view instead.
using InternalLogFunction = void (*)(absl::LogSeverity severity,
const char* file, int line,
const std::string& message);
extern base_internal::AtomicHook<InternalLogFunction> internal_log_function;
void RegisterInternalLogFunction(InternalLogFunction func);
} // namespace raw_logging_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_RAW_LOGGING_H_

View File

@@ -0,0 +1,56 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Core interfaces and definitions used by by low-level interfaces such as
// SpinLock.
#ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
#define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
// Used to describe how a thread may be scheduled. Typically associated with
// the declaration of a resource supporting synchronized access.
//
// SCHEDULE_COOPERATIVE_AND_KERNEL:
// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may
// reschedule (using base::scheduling semantics); allowing other cooperative
// threads to proceed.
//
// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative")
// Specifies that no cooperative scheduling semantics may be used, even if the
// current thread is itself cooperatively scheduled. This means that
// cooperative threads will NOT allow other cooperative threads to execute in
// their place while waiting for a resource of this type. Host operating system
// semantics (e.g. a futex) may still be used.
//
// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL
// by default. SCHEDULE_KERNEL_ONLY should only be used for resources on which
// base::scheduling (e.g. the implementation of a Scheduler) may depend.
//
// NOTE: Cooperative resources may not be nested below non-cooperative ones.
// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL
// resource if a SCHEDULE_KERNEL_ONLY resource is already held.
enum SchedulingMode {
SCHEDULE_KERNEL_ONLY = 0, // Allow scheduling only the host OS.
SCHEDULE_COOPERATIVE_AND_KERNEL, // Also allow cooperative scheduling.
};
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_

View File

@@ -0,0 +1,81 @@
// Copyright 2019 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/scoped_set_env.h"
#ifdef _WIN32
#include <windows.h>
#endif
#include <cstdlib>
#include "absl/base/internal/raw_logging.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
namespace {
#ifdef _WIN32
const int kMaxEnvVarValueSize = 1024;
#endif
void SetEnvVar(const char* name, const char* value) {
#ifdef _WIN32
SetEnvironmentVariableA(name, value);
#else
if (value == nullptr) {
::unsetenv(name);
} else {
::setenv(name, value, 1);
}
#endif
}
} // namespace
ScopedSetEnv::ScopedSetEnv(const char* var_name, const char* new_value)
: var_name_(var_name), was_unset_(false) {
#ifdef _WIN32
char buf[kMaxEnvVarValueSize];
auto get_res = GetEnvironmentVariableA(var_name_.c_str(), buf, sizeof(buf));
ABSL_INTERNAL_CHECK(get_res < sizeof(buf), "value exceeds buffer size");
if (get_res == 0) {
was_unset_ = (GetLastError() == ERROR_ENVVAR_NOT_FOUND);
} else {
old_value_.assign(buf, get_res);
}
SetEnvironmentVariableA(var_name_.c_str(), new_value);
#else
const char* val = ::getenv(var_name_.c_str());
if (val == nullptr) {
was_unset_ = true;
} else {
old_value_ = val;
}
#endif
SetEnvVar(var_name_.c_str(), new_value);
}
ScopedSetEnv::~ScopedSetEnv() {
SetEnvVar(var_name_.c_str(), was_unset_ ? nullptr : old_value_.c_str());
}
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,43 @@
//
// Copyright 2019 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
#define ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
#include <string>
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
class ScopedSetEnv {
public:
ScopedSetEnv(const char* var_name, const char* new_value);
~ScopedSetEnv();
private:
std::string var_name_;
std::string old_value_;
// True if the environment variable was initially not set.
bool was_unset_;
};
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_

View File

@@ -0,0 +1,99 @@
// Copyright 2019 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef _WIN32
#include <windows.h>
#endif
#include "gtest/gtest.h"
#include "absl/base/internal/scoped_set_env.h"
namespace {
using absl::base_internal::ScopedSetEnv;
std::string GetEnvVar(const char* name) {
#ifdef _WIN32
char buf[1024];
auto get_res = GetEnvironmentVariableA(name, buf, sizeof(buf));
if (get_res >= sizeof(buf)) {
return "TOO_BIG";
}
if (get_res == 0) {
return "UNSET";
}
return std::string(buf, get_res);
#else
const char* val = ::getenv(name);
if (val == nullptr) {
return "UNSET";
}
return val;
#endif
}
TEST(ScopedSetEnvTest, SetNonExistingVarToString) {
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
{
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value");
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
}
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
}
TEST(ScopedSetEnvTest, SetNonExistingVarToNull) {
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
{
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr);
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
}
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
}
TEST(ScopedSetEnvTest, SetExistingVarToString) {
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value");
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
{
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "new_value");
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "new_value");
}
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
}
TEST(ScopedSetEnvTest, SetExistingVarToNull) {
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value");
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
{
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr);
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
}
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
}
} // namespace

View File

@@ -0,0 +1,233 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/spinlock.h"
#include <algorithm>
#include <atomic>
#include <limits>
#include "absl/base/attributes.h"
#include "absl/base/internal/atomic_hook.h"
#include "absl/base/internal/cycleclock.h"
#include "absl/base/internal/spinlock_wait.h"
#include "absl/base/internal/sysinfo.h" /* For NumCPUs() */
#include "absl/base/call_once.h"
// Description of lock-word:
// 31..00: [............................3][2][1][0]
//
// [0]: kSpinLockHeld
// [1]: kSpinLockCooperative
// [2]: kSpinLockDisabledScheduling
// [31..3]: ONLY kSpinLockSleeper OR
// Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT
//
// Detailed descriptions:
//
// Bit [0]: The lock is considered held iff kSpinLockHeld is set.
//
// Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when
// contended iff kSpinLockCooperative is set.
//
// Bit [2]: This bit is exclusive from bit [1]. It is used only by a
// non-cooperative lock. When set, indicates that scheduling was
// successfully disabled when the lock was acquired. May be unset,
// even if non-cooperative, if a ThreadIdentity did not yet exist at
// time of acquisition.
//
// Bit [3]: If this is the only upper bit ([31..3]) set then this lock was
// acquired without contention, however, at least one waiter exists.
//
// Otherwise, bits [31..3] represent the time spent by the current lock
// holder to acquire the lock. There may be outstanding waiter(s).
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
ABSL_CONST_INIT static base_internal::AtomicHook<void (*)(const void *lock,
int64_t wait_cycles)>
submit_profile_data;
void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
int64_t wait_cycles)) {
submit_profile_data.Store(fn);
}
// Uncommon constructors.
SpinLock::SpinLock(base_internal::SchedulingMode mode)
: lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
}
SpinLock::SpinLock(base_internal::LinkerInitialized,
base_internal::SchedulingMode mode) {
ABSL_TSAN_MUTEX_CREATE(this, 0);
if (IsCooperative(mode)) {
InitLinkerInitializedAndCooperative();
}
// Otherwise, lockword_ is already initialized.
}
// Static (linker initialized) spinlocks always start life as functional
// non-cooperative locks. When their static constructor does run, it will call
// this initializer to augment the lockword with the cooperative bit. By
// actually taking the lock when we do this we avoid the need for an atomic
// operation in the regular unlock path.
//
// SlowLock() must be careful to re-test for this bit so that any outstanding
// waiters may be upgraded to cooperative status.
void SpinLock::InitLinkerInitializedAndCooperative() {
Lock();
lockword_.fetch_or(kSpinLockCooperative, std::memory_order_relaxed);
Unlock();
}
// Monitor the lock to see if its value changes within some time period
// (adaptive_spin_count loop iterations). The last value read from the lock
// is returned from the method.
uint32_t SpinLock::SpinLoop() {
// We are already in the slow path of SpinLock, initialize the
// adaptive_spin_count here.
ABSL_CONST_INIT static absl::once_flag init_adaptive_spin_count;
ABSL_CONST_INIT static int adaptive_spin_count = 0;
base_internal::LowLevelCallOnce(&init_adaptive_spin_count, []() {
adaptive_spin_count = base_internal::NumCPUs() > 1 ? 1000 : 1;
});
int c = adaptive_spin_count;
uint32_t lock_value;
do {
lock_value = lockword_.load(std::memory_order_relaxed);
} while ((lock_value & kSpinLockHeld) != 0 && --c > 0);
return lock_value;
}
void SpinLock::SlowLock() {
uint32_t lock_value = SpinLoop();
lock_value = TryLockInternal(lock_value, 0);
if ((lock_value & kSpinLockHeld) == 0) {
return;
}
// The lock was not obtained initially, so this thread needs to wait for
// it. Record the current timestamp in the local variable wait_start_time
// so the total wait time can be stored in the lockword once this thread
// obtains the lock.
int64_t wait_start_time = CycleClock::Now();
uint32_t wait_cycles = 0;
int lock_wait_call_count = 0;
while ((lock_value & kSpinLockHeld) != 0) {
// If the lock is currently held, but not marked as having a sleeper, mark
// it as having a sleeper.
if ((lock_value & kWaitTimeMask) == 0) {
// Here, just "mark" that the thread is going to sleep. Don't store the
// lock wait time in the lock as that will cause the current lock
// owner to think it experienced contention.
if (lockword_.compare_exchange_strong(
lock_value, lock_value | kSpinLockSleeper,
std::memory_order_relaxed, std::memory_order_relaxed)) {
// Successfully transitioned to kSpinLockSleeper. Pass
// kSpinLockSleeper to the SpinLockWait routine to properly indicate
// the last lock_value observed.
lock_value |= kSpinLockSleeper;
} else if ((lock_value & kSpinLockHeld) == 0) {
// Lock is free again, so try and acquire it before sleeping. The
// new lock state will be the number of cycles this thread waited if
// this thread obtains the lock.
lock_value = TryLockInternal(lock_value, wait_cycles);
continue; // Skip the delay at the end of the loop.
}
}
base_internal::SchedulingMode scheduling_mode;
if ((lock_value & kSpinLockCooperative) != 0) {
scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
} else {
scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY;
}
// SpinLockDelay() calls into fiber scheduler, we need to see
// synchronization there to avoid false positives.
ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
// Wait for an OS specific delay.
base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count,
scheduling_mode);
ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
// Spin again after returning from the wait routine to give this thread
// some chance of obtaining the lock.
lock_value = SpinLoop();
wait_cycles = EncodeWaitCycles(wait_start_time, CycleClock::Now());
lock_value = TryLockInternal(lock_value, wait_cycles);
}
}
void SpinLock::SlowUnlock(uint32_t lock_value) {
base_internal::SpinLockWake(&lockword_,
false); // wake waiter if necessary
// If our acquisition was contended, collect contentionz profile info. We
// reserve a unitary wait time to represent that a waiter exists without our
// own acquisition having been contended.
if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) {
const uint64_t wait_cycles = DecodeWaitCycles(lock_value);
ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
submit_profile_data(this, wait_cycles);
ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
}
}
// We use the upper 29 bits of the lock word to store the time spent waiting to
// acquire this lock. This is reported by contentionz profiling. Since the
// lower bits of the cycle counter wrap very quickly on high-frequency
// processors we divide to reduce the granularity to 2^PROFILE_TIMESTAMP_SHIFT
// sized units. On a 4Ghz machine this will lose track of wait times greater
// than (2^29/4 Ghz)*128 =~ 17.2 seconds. Such waits should be extremely rare.
enum { PROFILE_TIMESTAMP_SHIFT = 7 };
enum { LOCKWORD_RESERVED_SHIFT = 3 }; // We currently reserve the lower 3 bits.
uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
int64_t wait_end_time) {
static const int64_t kMaxWaitTime =
std::numeric_limits<uint32_t>::max() >> LOCKWORD_RESERVED_SHIFT;
int64_t scaled_wait_time =
(wait_end_time - wait_start_time) >> PROFILE_TIMESTAMP_SHIFT;
// Return a representation of the time spent waiting that can be stored in
// the lock word's upper bits.
uint32_t clamped = static_cast<uint32_t>(
std::min(scaled_wait_time, kMaxWaitTime) << LOCKWORD_RESERVED_SHIFT);
if (clamped == 0) {
return kSpinLockSleeper; // Just wake waiters, but don't record contention.
}
// Bump up value if necessary to avoid returning kSpinLockSleeper.
const uint32_t kMinWaitTime =
kSpinLockSleeper + (1 << LOCKWORD_RESERVED_SHIFT);
if (clamped == kSpinLockSleeper) {
return kMinWaitTime;
}
return clamped;
}
uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
// Cast to uint32_t first to ensure bits [63:32] are cleared.
const uint64_t scaled_wait_time =
static_cast<uint32_t>(lock_value & kWaitTimeMask);
return scaled_wait_time
<< (PROFILE_TIMESTAMP_SHIFT - LOCKWORD_RESERVED_SHIFT);
}
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,244 @@
//
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Most users requiring mutual exclusion should use Mutex.
// SpinLock is provided for use in three situations:
// - for use in code that Mutex itself depends on
// - to get a faster fast-path release under low contention (without an
// atomic read-modify-write) In return, SpinLock has worse behaviour under
// contention, which is why Mutex is preferred in most situations.
// - for async signal safety (see below)
// SpinLock is async signal safe. If a spinlock is used within a signal
// handler, all code that acquires the lock must ensure that the signal cannot
// arrive while they are holding the lock. Typically, this is done by blocking
// the signal.
#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
#define ABSL_BASE_INTERNAL_SPINLOCK_H_
#include <stdint.h>
#include <sys/types.h>
#include <atomic>
#include "absl/base/attributes.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/low_level_scheduling.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/scheduling_mode.h"
#include "absl/base/internal/tsan_mutex_interface.h"
#include "absl/base/macros.h"
#include "absl/base/port.h"
#include "absl/base/thread_annotations.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
class LOCKABLE SpinLock {
public:
SpinLock() : lockword_(kSpinLockCooperative) {
ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
}
// Special constructor for use with static SpinLock objects. E.g.,
//
// static SpinLock lock(base_internal::kLinkerInitialized);
//
// When initialized using this constructor, we depend on the fact
// that the linker has already initialized the memory appropriately. The lock
// is initialized in non-cooperative mode.
//
// A SpinLock constructed like this can be freely used from global
// initializers without worrying about the order in which global
// initializers run.
explicit SpinLock(base_internal::LinkerInitialized) {
// Does nothing; lockword_ is already initialized
ABSL_TSAN_MUTEX_CREATE(this, 0);
}
// Constructors that allow non-cooperative spinlocks to be created for use
// inside thread schedulers. Normal clients should not use these.
explicit SpinLock(base_internal::SchedulingMode mode);
SpinLock(base_internal::LinkerInitialized,
base_internal::SchedulingMode mode);
~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
// Acquire this SpinLock.
inline void Lock() EXCLUSIVE_LOCK_FUNCTION() {
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
if (!TryLockImpl()) {
SlowLock();
}
ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
}
// Try to acquire this SpinLock without blocking and return true if the
// acquisition was successful. If the lock was not acquired, false is
// returned. If this SpinLock is free at the time of the call, TryLock
// will return true with high probability.
inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
bool res = TryLockImpl();
ABSL_TSAN_MUTEX_POST_LOCK(
this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
0);
return res;
}
// Release this SpinLock, which must be held by the calling thread.
inline void Unlock() UNLOCK_FUNCTION() {
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
std::memory_order_release);
if ((lock_value & kSpinLockDisabledScheduling) != 0) {
base_internal::SchedulingGuard::EnableRescheduling(true);
}
if ((lock_value & kWaitTimeMask) != 0) {
// Collect contentionz profile info, and speed the wakeup of any waiter.
// The wait_cycles value indicates how long this thread spent waiting
// for the lock.
SlowUnlock(lock_value);
}
ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
}
// Determine if the lock is held. When the lock is held by the invoking
// thread, true will always be returned. Intended to be used as
// CHECK(lock.IsHeld()).
inline bool IsHeld() const {
return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
}
protected:
// These should not be exported except for testing.
// Store number of cycles between wait_start_time and wait_end_time in a
// lock value.
static uint32_t EncodeWaitCycles(int64_t wait_start_time,
int64_t wait_end_time);
// Extract number of wait cycles in a lock value.
static uint64_t DecodeWaitCycles(uint32_t lock_value);
// Provide access to protected method above. Use for testing only.
friend struct SpinLockTest;
private:
// lockword_ is used to store the following:
//
// bit[0] encodes whether a lock is being held.
// bit[1] encodes whether a lock uses cooperative scheduling.
// bit[2] encodes whether a lock disables scheduling.
// bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
enum { kSpinLockHeld = 1 };
enum { kSpinLockCooperative = 2 };
enum { kSpinLockDisabledScheduling = 4 };
enum { kSpinLockSleeper = 8 };
enum { kWaitTimeMask = // Includes kSpinLockSleeper.
~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling) };
// Returns true if the provided scheduling mode is cooperative.
static constexpr bool IsCooperative(
base_internal::SchedulingMode scheduling_mode) {
return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
}
uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
void InitLinkerInitializedAndCooperative();
void SlowLock() ABSL_ATTRIBUTE_COLD;
void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
uint32_t SpinLoop();
inline bool TryLockImpl() {
uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
}
std::atomic<uint32_t> lockword_;
SpinLock(const SpinLock&) = delete;
SpinLock& operator=(const SpinLock&) = delete;
};
// Corresponding locker object that arranges to acquire a spinlock for
// the duration of a C++ scope.
class SCOPED_LOCKABLE SpinLockHolder {
public:
inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l)
: lock_(l) {
l->Lock();
}
inline ~SpinLockHolder() UNLOCK_FUNCTION() { lock_->Unlock(); }
SpinLockHolder(const SpinLockHolder&) = delete;
SpinLockHolder& operator=(const SpinLockHolder&) = delete;
private:
SpinLock* lock_;
};
// Register a hook for profiling support.
//
// The function pointer registered here will be called whenever a spinlock is
// contended. The callback is given an opaque handle to the contended spinlock
// and the number of wait cycles. This is thread-safe, but only a single
// profiler can be registered. It is an error to call this function multiple
// times with different arguments.
void RegisterSpinLockProfiler(void (*fn)(const void* lock,
int64_t wait_cycles));
//------------------------------------------------------------------------------
// Public interface ends here.
//------------------------------------------------------------------------------
// If (result & kSpinLockHeld) == 0, then *this was successfully locked.
// Otherwise, returns last observed value for lockword_.
inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
uint32_t wait_cycles) {
if ((lock_value & kSpinLockHeld) != 0) {
return lock_value;
}
uint32_t sched_disabled_bit = 0;
if ((lock_value & kSpinLockCooperative) == 0) {
// For non-cooperative locks we must make sure we mark ourselves as
// non-reschedulable before we attempt to CompareAndSwap.
if (base_internal::SchedulingGuard::DisableRescheduling()) {
sched_disabled_bit = kSpinLockDisabledScheduling;
}
}
if (lockword_.compare_exchange_strong(
lock_value,
kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
std::memory_order_acquire, std::memory_order_relaxed)) {
} else {
base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
}
return lock_value;
}
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_SPINLOCK_H_

View File

@@ -0,0 +1,35 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This file is an Akaros-specific part of spinlock_wait.cc
#include <atomic>
#include "absl/base/internal/scheduling_mode.h"
extern "C" {
ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */,
int /* loop */, absl::base_internal::SchedulingMode /* mode */) {
// In Akaros, one must take care not to call anything that could cause a
// malloc(), a blocking system call, or a uthread_yield() while holding a
// spinlock. Our callers assume will not call into libraries or other
// arbitrary code.
}
ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(
std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
} // extern "C"

View File

@@ -0,0 +1,52 @@
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// See also //absl/synchronization:mutex_benchmark for a comparison of SpinLock
// and Mutex performance under varying levels of contention.
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/scheduling_mode.h"
#include "absl/base/internal/spinlock.h"
#include "absl/synchronization/internal/create_thread_identity.h"
#include "benchmark/benchmark.h"
namespace {
template <absl::base_internal::SchedulingMode scheduling_mode>
static void BM_SpinLock(benchmark::State& state) {
// Ensure a ThreadIdentity is installed.
ABSL_INTERNAL_CHECK(
absl::synchronization_internal::GetOrCreateCurrentThreadIdentity() !=
nullptr,
"GetOrCreateCurrentThreadIdentity() failed");
static auto* spinlock = new absl::base_internal::SpinLock(scheduling_mode);
for (auto _ : state) {
absl::base_internal::SpinLockHolder holder(spinlock);
}
}
BENCHMARK_TEMPLATE(BM_SpinLock,
absl::base_internal::SCHEDULE_KERNEL_ONLY)
->UseRealTime()
->Threads(1)
->ThreadPerCpu();
BENCHMARK_TEMPLATE(BM_SpinLock,
absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL)
->UseRealTime()
->Threads(1)
->ThreadPerCpu();
} // namespace

View File

@@ -0,0 +1,67 @@
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This file is a Linux-specific part of spinlock_wait.cc
#include <linux/futex.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <atomic>
#include <cerrno>
#include <climits>
#include <cstdint>
#include <ctime>
#include "absl/base/attributes.h"
// The SpinLock lockword is `std::atomic<uint32_t>`. Here we assert that
// `std::atomic<uint32_t>` is bitwise equivalent of the `int` expected
// by SYS_futex. We also assume that reads/writes done to the lockword
// by SYS_futex have rational semantics with regard to the
// std::atomic<> API. C++ provides no guarantees of these assumptions,
// but they are believed to hold in practice.
static_assert(sizeof(std::atomic<uint32_t>) == sizeof(int),
"SpinLock lockword has the wrong size for a futex");
// Some Android headers are missing these definitions even though they
// support these futex operations.
#ifdef __BIONIC__
#ifndef SYS_futex
#define SYS_futex __NR_futex
#endif
#ifndef FUTEX_PRIVATE_FLAG
#define FUTEX_PRIVATE_FLAG 128
#endif
#endif
extern "C" {
ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
std::atomic<uint32_t> *w, uint32_t value, int loop,
absl::base_internal::SchedulingMode) {
int save_errno = errno;
struct timespec tm;
tm.tv_sec = 0;
tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop);
syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm);
errno = save_errno;
}
ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(std::atomic<uint32_t> *w,
bool all) {
syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0);
}
} // extern "C"

View File

@@ -0,0 +1,46 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This file is a Posix-specific part of spinlock_wait.cc
#include <sched.h>
#include <atomic>
#include <ctime>
#include <cerrno>
#include "absl/base/internal/scheduling_mode.h"
#include "absl/base/port.h"
extern "C" {
ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
absl::base_internal::SchedulingMode /* mode */) {
int save_errno = errno;
if (loop == 0) {
} else if (loop == 1) {
sched_yield();
} else {
struct timespec tm;
tm.tv_sec = 0;
tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop);
nanosleep(&tm, nullptr);
}
errno = save_errno;
}
ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(
std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
} // extern "C"

View File

@@ -0,0 +1,81 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The OS-specific header included below must provide two calls:
// AbslInternalSpinLockDelay() and AbslInternalSpinLockWake().
// See spinlock_wait.h for the specs.
#include <atomic>
#include <cstdint>
#include "absl/base/internal/spinlock_wait.h"
#if defined(_WIN32)
#include "absl/base/internal/spinlock_win32.inc"
#elif defined(__linux__)
#include "absl/base/internal/spinlock_linux.inc"
#elif defined(__akaros__)
#include "absl/base/internal/spinlock_akaros.inc"
#else
#include "absl/base/internal/spinlock_posix.inc"
#endif
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
// See spinlock_wait.h for spec.
uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
const SpinLockWaitTransition trans[],
base_internal::SchedulingMode scheduling_mode) {
int loop = 0;
for (;;) {
uint32_t v = w->load(std::memory_order_acquire);
int i;
for (i = 0; i != n && v != trans[i].from; i++) {
}
if (i == n) {
SpinLockDelay(w, v, ++loop, scheduling_mode); // no matching transition
} else if (trans[i].to == v || // null transition
w->compare_exchange_strong(v, trans[i].to,
std::memory_order_acquire,
std::memory_order_relaxed)) {
if (trans[i].done) return v;
}
}
}
static std::atomic<uint64_t> delay_rand;
// Return a suggested delay in nanoseconds for iteration number "loop"
int SpinLockSuggestedDelayNS(int loop) {
// Weak pseudo-random number generator to get some spread between threads
// when many are spinning.
uint64_t r = delay_rand.load(std::memory_order_relaxed);
r = 0x5deece66dLL * r + 0xb; // numbers from nrand48()
delay_rand.store(r, std::memory_order_relaxed);
if (loop < 0 || loop > 32) { // limit loop to 0..32
loop = 32;
}
const int kMinDelay = 128 << 10; // 128us
// Double delay every 8 iterations, up to 16x (2ms).
int delay = kMinDelay << (loop / 8);
// Randomize in delay..2*delay range, for resulting 128us..4ms range.
return delay | ((delay - 1) & static_cast<int>(r));
}
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,93 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
#define ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
// Operations to make atomic transitions on a word, and to allow
// waiting for those transitions to become possible.
#include <stdint.h>
#include <atomic>
#include "absl/base/internal/scheduling_mode.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
// SpinLockWait() waits until it can perform one of several transitions from
// "from" to "to". It returns when it performs a transition where done==true.
struct SpinLockWaitTransition {
uint32_t from;
uint32_t to;
bool done;
};
// Wait until *w can transition from trans[i].from to trans[i].to for some i
// satisfying 0<=i<n && trans[i].done, atomically make the transition,
// then return the old value of *w. Make any other atomic transitions
// where !trans[i].done, but continue waiting.
uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
const SpinLockWaitTransition trans[],
SchedulingMode scheduling_mode);
// If possible, wake some thread that has called SpinLockDelay(w, ...). If
// "all" is true, wake all such threads. This call is a hint, and on some
// systems it may be a no-op; threads calling SpinLockDelay() will always wake
// eventually even if SpinLockWake() is never called.
void SpinLockWake(std::atomic<uint32_t> *w, bool all);
// Wait for an appropriate spin delay on iteration "loop" of a
// spin loop on location *w, whose previously observed value was "value".
// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
// or may wait for a delay that can be truncated by a call to SpinLockWake(w).
// In all cases, it must return in bounded time even if SpinLockWake() is not
// called.
void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop,
base_internal::SchedulingMode scheduling_mode);
// Helper used by AbslInternalSpinLockDelay.
// Returns a suggested delay in nanoseconds for iteration number "loop".
int SpinLockSuggestedDelayNS(int loop);
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
// In some build configurations we pass --detect-odr-violations to the
// gold linker. This causes it to flag weak symbol overrides as ODR
// violations. Because ODR only applies to C++ and not C,
// --detect-odr-violations ignores symbols not mangled with C++ names.
// By changing our extension points to be extern "C", we dodge this
// check.
extern "C" {
void AbslInternalSpinLockWake(std::atomic<uint32_t> *w, bool all);
void AbslInternalSpinLockDelay(
std::atomic<uint32_t> *w, uint32_t value, int loop,
absl::base_internal::SchedulingMode scheduling_mode);
}
inline void absl::base_internal::SpinLockWake(std::atomic<uint32_t> *w,
bool all) {
AbslInternalSpinLockWake(w, all);
}
inline void absl::base_internal::SpinLockDelay(
std::atomic<uint32_t> *w, uint32_t value, int loop,
absl::base_internal::SchedulingMode scheduling_mode) {
AbslInternalSpinLockDelay(w, value, loop, scheduling_mode);
}
#endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_

View File

@@ -0,0 +1,37 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This file is a Win32-specific part of spinlock_wait.cc
#include <windows.h>
#include <atomic>
#include "absl/base/internal/scheduling_mode.h"
extern "C" {
void AbslInternalSpinLockDelay(std::atomic<uint32_t>* /* lock_word */,
uint32_t /* value */, int loop,
absl::base_internal::SchedulingMode /* mode */) {
if (loop == 0) {
} else if (loop == 1) {
Sleep(0);
} else {
Sleep(absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000);
}
}
void AbslInternalSpinLockWake(std::atomic<uint32_t>* /* lock_word */,
bool /* all */) {}
} // extern "C"

View File

@@ -0,0 +1,406 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/sysinfo.h"
#include "absl/base/attributes.h"
#ifdef _WIN32
#include <shlwapi.h>
#include <windows.h>
#else
#include <fcntl.h>
#include <pthread.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#endif
#ifdef __linux__
#include <sys/syscall.h>
#endif
#if defined(__APPLE__) || defined(__FreeBSD__)
#include <sys/sysctl.h>
#endif
#if defined(__myriad2__)
#include <rtems.h>
#endif
#include <string.h>
#include <cassert>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <limits>
#include <thread> // NOLINT(build/c++11)
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
#include "absl/base/internal/unscaledcycleclock.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
static once_flag init_system_info_once;
static int num_cpus = 0;
static double nominal_cpu_frequency = 1.0; // 0.0 might be dangerous.
static int GetNumCPUs() {
#if defined(__myriad2__)
return 1;
#else
// Other possibilities:
// - Read /sys/devices/system/cpu/online and use cpumask_parse()
// - sysconf(_SC_NPROCESSORS_ONLN)
return std::thread::hardware_concurrency();
#endif
}
#if defined(_WIN32)
static double GetNominalCPUFrequency() {
DWORD data;
DWORD data_size = sizeof(data);
#pragma comment(lib, "shlwapi.lib") // For SHGetValue().
if (SUCCEEDED(
SHGetValueA(HKEY_LOCAL_MACHINE,
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
"~MHz", nullptr, &data, &data_size))) {
return data * 1e6; // Value is MHz.
}
return 1.0;
}
#elif defined(CTL_HW) && defined(HW_CPU_FREQ)
static double GetNominalCPUFrequency() {
unsigned freq;
size_t size = sizeof(freq);
int mib[2] = {CTL_HW, HW_CPU_FREQ};
if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) {
return static_cast<double>(freq);
}
return 1.0;
}
#else
// Helper function for reading a long from a file. Returns true if successful
// and the memory location pointed to by value is set to the value read.
static bool ReadLongFromFile(const char *file, long *value) {
bool ret = false;
int fd = open(file, O_RDONLY);
if (fd != -1) {
char line[1024];
char *err;
memset(line, '\0', sizeof(line));
int len = read(fd, line, sizeof(line) - 1);
if (len <= 0) {
ret = false;
} else {
const long temp_value = strtol(line, &err, 10);
if (line[0] != '\0' && (*err == '\n' || *err == '\0')) {
*value = temp_value;
ret = true;
}
}
close(fd);
}
return ret;
}
#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
// Reads a monotonic time source and returns a value in
// nanoseconds. The returned value uses an arbitrary epoch, not the
// Unix epoch.
static int64_t ReadMonotonicClockNanos() {
struct timespec t;
#ifdef CLOCK_MONOTONIC_RAW
int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t);
#else
int rc = clock_gettime(CLOCK_MONOTONIC, &t);
#endif
if (rc != 0) {
perror("clock_gettime() failed");
abort();
}
return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec;
}
class UnscaledCycleClockWrapperForInitializeFrequency {
public:
static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); }
};
struct TimeTscPair {
int64_t time; // From ReadMonotonicClockNanos().
int64_t tsc; // From UnscaledCycleClock::Now().
};
// Returns a pair of values (monotonic kernel time, TSC ticks) that
// approximately correspond to each other. This is accomplished by
// doing several reads and picking the reading with the lowest
// latency. This approach is used to minimize the probability that
// our thread was preempted between clock reads.
static TimeTscPair GetTimeTscPair() {
int64_t best_latency = std::numeric_limits<int64_t>::max();
TimeTscPair best;
for (int i = 0; i < 10; ++i) {
int64_t t0 = ReadMonotonicClockNanos();
int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now();
int64_t t1 = ReadMonotonicClockNanos();
int64_t latency = t1 - t0;
if (latency < best_latency) {
best_latency = latency;
best.time = t0;
best.tsc = tsc;
}
}
return best;
}
// Measures and returns the TSC frequency by taking a pair of
// measurements approximately `sleep_nanoseconds` apart.
static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) {
auto t0 = GetTimeTscPair();
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = sleep_nanoseconds;
while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {}
auto t1 = GetTimeTscPair();
double elapsed_ticks = t1.tsc - t0.tsc;
double elapsed_time = (t1.time - t0.time) * 1e-9;
return elapsed_ticks / elapsed_time;
}
// Measures and returns the TSC frequency by calling
// MeasureTscFrequencyWithSleep(), doubling the sleep interval until the
// frequency measurement stabilizes.
static double MeasureTscFrequency() {
double last_measurement = -1.0;
int sleep_nanoseconds = 1000000; // 1 millisecond.
for (int i = 0; i < 8; ++i) {
double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds);
if (measurement * 0.99 < last_measurement &&
last_measurement < measurement * 1.01) {
// Use the current measurement if it is within 1% of the
// previous measurement.
return measurement;
}
last_measurement = measurement;
sleep_nanoseconds *= 2;
}
return last_measurement;
}
#endif // ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
static double GetNominalCPUFrequency() {
long freq = 0;
// Google's production kernel has a patch to export the TSC
// frequency through sysfs. If the kernel is exporting the TSC
// frequency use that. There are issues where cpuinfo_max_freq
// cannot be relied on because the BIOS may be exporting an invalid
// p-state (on x86) or p-states may be used to put the processor in
// a new mode (turbo mode). Essentially, those frequencies cannot
// always be relied upon. The same reasons apply to /proc/cpuinfo as
// well.
if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) {
return freq * 1e3; // Value is kHz.
}
#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
// On these platforms, the TSC frequency is the nominal CPU
// frequency. But without having the kernel export it directly
// though /sys/devices/system/cpu/cpu0/tsc_freq_khz, there is no
// other way to reliably get the TSC frequency, so we have to
// measure it ourselves. Some CPUs abuse cpuinfo_max_freq by
// exporting "fake" frequencies for implementing new features. For
// example, Intel's turbo mode is enabled by exposing a p-state
// value with a higher frequency than that of the real TSC
// rate. Because of this, we prefer to measure the TSC rate
// ourselves on i386 and x86-64.
return MeasureTscFrequency();
#else
// If CPU scaling is in effect, we want to use the *maximum*
// frequency, not whatever CPU speed some random processor happens
// to be using now.
if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
&freq)) {
return freq * 1e3; // Value is kHz.
}
return 1.0;
#endif // !ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
}
#endif
// InitializeSystemInfo() may be called before main() and before
// malloc is properly initialized, therefore this must not allocate
// memory.
static void InitializeSystemInfo() {
num_cpus = GetNumCPUs();
nominal_cpu_frequency = GetNominalCPUFrequency();
}
int NumCPUs() {
base_internal::LowLevelCallOnce(&init_system_info_once, InitializeSystemInfo);
return num_cpus;
}
double NominalCPUFrequency() {
base_internal::LowLevelCallOnce(&init_system_info_once, InitializeSystemInfo);
return nominal_cpu_frequency;
}
#if defined(_WIN32)
pid_t GetTID() {
return GetCurrentThreadId();
}
#elif defined(__linux__)
#ifndef SYS_gettid
#define SYS_gettid __NR_gettid
#endif
pid_t GetTID() {
return syscall(SYS_gettid);
}
#elif defined(__akaros__)
pid_t GetTID() {
// Akaros has a concept of "vcore context", which is the state the program
// is forced into when we need to make a user-level scheduling decision, or
// run a signal handler. This is analogous to the interrupt context that a
// CPU might enter if it encounters some kind of exception.
//
// There is no current thread context in vcore context, but we need to give
// a reasonable answer if asked for a thread ID (e.g., in a signal handler).
// Thread 0 always exists, so if we are in vcore context, we return that.
//
// Otherwise, we know (since we are using pthreads) that the uthread struct
// current_uthread is pointing to is the first element of a
// struct pthread_tcb, so we extract and return the thread ID from that.
//
// TODO(dcross): Akaros anticipates moving the thread ID to the uthread
// structure at some point. We should modify this code to remove the cast
// when that happens.
if (in_vcore_context())
return 0;
return reinterpret_cast<struct pthread_tcb *>(current_uthread)->id;
}
#elif defined(__myriad2__)
pid_t GetTID() {
uint32_t tid;
rtems_task_ident(RTEMS_SELF, 0, &tid);
return tid;
}
#else
// Fallback implementation of GetTID using pthread_getspecific.
static once_flag tid_once;
static pthread_key_t tid_key;
static absl::base_internal::SpinLock tid_lock(
absl::base_internal::kLinkerInitialized);
// We set a bit per thread in this array to indicate that an ID is in
// use. ID 0 is unused because it is the default value returned by
// pthread_getspecific().
static std::vector<uint32_t>* tid_array GUARDED_BY(tid_lock) = nullptr;
static constexpr int kBitsPerWord = 32; // tid_array is uint32_t.
// Returns the TID to tid_array.
static void FreeTID(void *v) {
intptr_t tid = reinterpret_cast<intptr_t>(v);
int word = tid / kBitsPerWord;
uint32_t mask = ~(1u << (tid % kBitsPerWord));
absl::base_internal::SpinLockHolder lock(&tid_lock);
assert(0 <= word && static_cast<size_t>(word) < tid_array->size());
(*tid_array)[word] &= mask;
}
static void InitGetTID() {
if (pthread_key_create(&tid_key, FreeTID) != 0) {
// The logging system calls GetTID() so it can't be used here.
perror("pthread_key_create failed");
abort();
}
// Initialize tid_array.
absl::base_internal::SpinLockHolder lock(&tid_lock);
tid_array = new std::vector<uint32_t>(1);
(*tid_array)[0] = 1; // ID 0 is never-allocated.
}
// Return a per-thread small integer ID from pthread's thread-specific data.
pid_t GetTID() {
absl::call_once(tid_once, InitGetTID);
intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key));
if (tid != 0) {
return tid;
}
int bit; // tid_array[word] = 1u << bit;
size_t word;
{
// Search for the first unused ID.
absl::base_internal::SpinLockHolder lock(&tid_lock);
// First search for a word in the array that is not all ones.
word = 0;
while (word < tid_array->size() && ~(*tid_array)[word] == 0) {
++word;
}
if (word == tid_array->size()) {
tid_array->push_back(0); // No space left, add kBitsPerWord more IDs.
}
// Search for a zero bit in the word.
bit = 0;
while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) {
++bit;
}
tid = (word * kBitsPerWord) + bit;
(*tid_array)[word] |= 1u << bit; // Mark the TID as allocated.
}
if (pthread_setspecific(tid_key, reinterpret_cast<void *>(tid)) != 0) {
perror("pthread_setspecific failed");
abort();
}
return static_cast<pid_t>(tid);
}
#endif
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,65 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This file includes routines to find out characteristics
// of the machine a program is running on. It is undoubtedly
// system-dependent.
// Functions listed here that accept a pid_t as an argument act on the
// current process if the pid_t argument is 0
// All functions here are thread-hostile due to file caching unless
// commented otherwise.
#ifndef ABSL_BASE_INTERNAL_SYSINFO_H_
#define ABSL_BASE_INTERNAL_SYSINFO_H_
#ifndef _WIN32
#include <sys/types.h>
#else
#include <intsafe.h>
#endif
#include "absl/base/port.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
// Nominal core processor cycles per second of each processor. This is _not_
// necessarily the frequency of the CycleClock counter (see cycleclock.h)
// Thread-safe.
double NominalCPUFrequency();
// Number of logical processors (hyperthreads) in system. Thread-safe.
int NumCPUs();
// Return the thread id of the current thread, as told by the system.
// No two currently-live threads implemented by the OS shall have the same ID.
// Thread ids of exited threads may be reused. Multiple user-level threads
// may have the same thread ID if multiplexed on the same OS thread.
//
// On Linux, you may send a signal to the resulting ID with kill(). However,
// it is recommended for portability that you use pthread_kill() instead.
#ifdef _WIN32
// On Windows, process id and thread id are of the same type according to
// the return types of GetProcessId() and GetThreadId() are both DWORD.
using pid_t = DWORD;
#endif
pid_t GetTID();
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_SYSINFO_H_

View File

@@ -0,0 +1,100 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/sysinfo.h"
#ifndef _WIN32
#include <sys/types.h>
#include <unistd.h>
#endif
#include <thread> // NOLINT(build/c++11)
#include <unordered_set>
#include <vector>
#include "gtest/gtest.h"
#include "absl/synchronization/barrier.h"
#include "absl/synchronization/mutex.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
namespace {
TEST(SysinfoTest, NumCPUs) {
EXPECT_NE(NumCPUs(), 0)
<< "NumCPUs() should not have the default value of 0";
}
TEST(SysinfoTest, NominalCPUFrequency) {
#if !(defined(__aarch64__) && defined(__linux__)) && !defined(__EMSCRIPTEN__)
EXPECT_GE(NominalCPUFrequency(), 1000.0)
<< "NominalCPUFrequency() did not return a reasonable value";
#else
// Aarch64 cannot read the CPU frequency from sysfs, so we get back 1.0.
// Emscripten does not have a sysfs to read from at all.
EXPECT_EQ(NominalCPUFrequency(), 1.0)
<< "CPU frequency detection was fixed! Please update unittest.";
#endif
}
TEST(SysinfoTest, GetTID) {
EXPECT_EQ(GetTID(), GetTID()); // Basic compile and equality test.
#ifdef __native_client__
// Native Client has a race condition bug that leads to memory
// exaustion when repeatedly creating and joining threads.
// https://bugs.chromium.org/p/nativeclient/issues/detail?id=1027
return;
#endif
// Test that TIDs are unique to each thread.
// Uses a few loops to exercise implementations that reallocate IDs.
for (int i = 0; i < 32; ++i) {
constexpr int kNumThreads = 64;
Barrier all_threads_done(kNumThreads);
std::vector<std::thread> threads;
Mutex mutex;
std::unordered_set<pid_t> tids;
for (int j = 0; j < kNumThreads; ++j) {
threads.push_back(std::thread([&]() {
pid_t id = GetTID();
{
MutexLock lock(&mutex);
ASSERT_TRUE(tids.find(id) == tids.end());
tids.insert(id);
}
// We can't simply join the threads here. The threads need to
// be alive otherwise the TID might have been reallocated to
// another live thread.
all_threads_done.Block();
}));
}
for (auto& thread : threads) {
thread.join();
}
}
}
#ifdef __linux__
TEST(SysinfoTest, LinuxGetTID) {
// On Linux, for the main thread, GetTID()==getpid() is guaranteed by the API.
EXPECT_EQ(GetTID(), getpid());
}
#endif
} // namespace
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,271 @@
// Copyright 2019 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: thread_annotations.h
// -----------------------------------------------------------------------------
//
// WARNING: This is a backwards compatible header and it will be removed after
// the migration to prefixed thread annotations is finished; please include
// "absl/base/thread_annotations.h".
//
// This header file contains macro definitions for thread safety annotations
// that allow developers to document the locking policies of multi-threaded
// code. The annotations can also help program analysis tools to identify
// potential thread safety issues.
//
// These annotations are implemented using compiler attributes. Using the macros
// defined here instead of raw attributes allow for portability and future
// compatibility.
//
// When referring to mutexes in the arguments of the attributes, you should
// use variable names or more complex expressions (e.g. my_object->mutex_)
// that evaluate to a concrete mutex object whenever possible. If the mutex
// you want to refer to is not in scope, you may use a member pointer
// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object.
#ifndef ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
#define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_
#if defined(__clang__)
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
#else
#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
#endif
// GUARDED_BY()
//
// Documents if a shared field or global variable needs to be protected by a
// mutex. GUARDED_BY() allows the user to specify a particular mutex that
// should be held when accessing the annotated variable.
//
// Although this annotation (and PT_GUARDED_BY, below) cannot be applied to
// local variables, a local variable and its associated mutex can often be
// combined into a small class or struct, thereby allowing the annotation.
//
// Example:
//
// class Foo {
// Mutex mu_;
// int p1_ GUARDED_BY(mu_);
// ...
// };
#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
// PT_GUARDED_BY()
//
// Documents if the memory location pointed to by a pointer should be guarded
// by a mutex when dereferencing the pointer.
//
// Example:
// class Foo {
// Mutex mu_;
// int *p1_ PT_GUARDED_BY(mu_);
// ...
// };
//
// Note that a pointer variable to a shared memory location could itself be a
// shared variable.
//
// Example:
//
// // `q_`, guarded by `mu1_`, points to a shared memory location that is
// // guarded by `mu2_`:
// int *q_ GUARDED_BY(mu1_) PT_GUARDED_BY(mu2_);
#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
// ACQUIRED_AFTER() / ACQUIRED_BEFORE()
//
// Documents the acquisition order between locks that can be held
// simultaneously by a thread. For any two locks that need to be annotated
// to establish an acquisition order, only one of them needs the annotation.
// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER
// and ACQUIRED_BEFORE.)
//
// As with GUARDED_BY, this is only applicable to mutexes that are shared
// fields or global variables.
//
// Example:
//
// Mutex m1_;
// Mutex m2_ ACQUIRED_AFTER(m1_);
#define ACQUIRED_AFTER(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
#define ACQUIRED_BEFORE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
// EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED()
//
// Documents a function that expects a mutex to be held prior to entry.
// The mutex is expected to be held both on entry to, and exit from, the
// function.
//
// An exclusive lock allows read-write access to the guarded data member(s), and
// only one thread can acquire a lock exclusively at any one time. A shared lock
// allows read-only access, and any number of threads can acquire a shared lock
// concurrently.
//
// Generally, non-const methods should be annotated with
// EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with
// SHARED_LOCKS_REQUIRED.
//
// Example:
//
// Mutex mu1, mu2;
// int a GUARDED_BY(mu1);
// int b GUARDED_BY(mu2);
//
// void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... }
// void bar() const SHARED_LOCKS_REQUIRED(mu1, mu2) { ... }
#define EXCLUSIVE_LOCKS_REQUIRED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
#define SHARED_LOCKS_REQUIRED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
// LOCKS_EXCLUDED()
//
// Documents the locks acquired in the body of the function. These locks
// cannot be held when calling this function (as Abseil's `Mutex` locks are
// non-reentrant).
#define LOCKS_EXCLUDED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
// LOCK_RETURNED()
//
// Documents a function that returns a mutex without acquiring it. For example,
// a public getter method that returns a pointer to a private mutex should
// be annotated with LOCK_RETURNED.
#define LOCK_RETURNED(x) \
THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
// LOCKABLE
//
// Documents if a class/type is a lockable type (such as the `Mutex` class).
#define LOCKABLE \
THREAD_ANNOTATION_ATTRIBUTE__(lockable)
// SCOPED_LOCKABLE
//
// Documents if a class does RAII locking (such as the `MutexLock` class).
// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is
// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no
// arguments; the analysis will assume that the destructor unlocks whatever the
// constructor locked.
#define SCOPED_LOCKABLE \
THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
// EXCLUSIVE_LOCK_FUNCTION()
//
// Documents functions that acquire a lock in the body of a function, and do
// not release it.
#define EXCLUSIVE_LOCK_FUNCTION(...) \
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
// SHARED_LOCK_FUNCTION()
//
// Documents functions that acquire a shared (reader) lock in the body of a
// function, and do not release it.
#define SHARED_LOCK_FUNCTION(...) \
THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
// UNLOCK_FUNCTION()
//
// Documents functions that expect a lock to be held on entry to the function,
// and release it in the body of the function.
#define UNLOCK_FUNCTION(...) \
THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
// EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION()
//
// Documents functions that try to acquire a lock, and return success or failure
// (or a non-boolean value that can be interpreted as a boolean).
// The first argument should be `true` for functions that return `true` on
// success, or `false` for functions that return `false` on success. The second
// argument specifies the mutex that is locked on success. If unspecified, this
// mutex is assumed to be `this`.
#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
#define SHARED_TRYLOCK_FUNCTION(...) \
THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
// ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK()
//
// Documents functions that dynamically check to see if a lock is held, and fail
// if it is not held.
#define ASSERT_EXCLUSIVE_LOCK(...) \
THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
#define ASSERT_SHARED_LOCK(...) \
THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
// NO_THREAD_SAFETY_ANALYSIS
//
// Turns off thread safety checking within the body of a particular function.
// This annotation is used to mark functions that are known to be correct, but
// the locking behavior is more complicated than the analyzer can handle.
#define NO_THREAD_SAFETY_ANALYSIS \
THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
//------------------------------------------------------------------------------
// Tool-Supplied Annotations
//------------------------------------------------------------------------------
// TS_UNCHECKED should be placed around lock expressions that are not valid
// C++ syntax, but which are present for documentation purposes. These
// annotations will be ignored by the analysis.
#define TS_UNCHECKED(x) ""
// TS_FIXME is used to mark lock expressions that are not valid C++ syntax.
// It is used by automated tools to mark and disable invalid expressions.
// The annotation should either be fixed, or changed to TS_UNCHECKED.
#define TS_FIXME(x) ""
// Like NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of
// a particular function. However, this attribute is used to mark functions
// that are incorrect and need to be fixed. It is used by automated tools to
// avoid breaking the build when the analysis is updated.
// Code owners are expected to eventually fix the routine.
#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS
// Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY
// annotation that needs to be fixed, because it is producing thread safety
// warning. It disables the GUARDED_BY.
#define GUARDED_BY_FIXME(x)
// Disables warnings for a single read operation. This can be used to avoid
// warnings when it is known that the read is not actually involved in a race,
// but the compiler cannot confirm that.
#define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x)
namespace thread_safety_analysis {
// Takes a reference to a guarded data member, and returns an unguarded
// reference.
template <typename T>
inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS {
return v;
}
template <typename T>
inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS {
return v;
}
} // namespace thread_safety_analysis
#endif // ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_

View File

@@ -0,0 +1,135 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/thread_identity.h"
#ifndef _WIN32
#include <pthread.h>
#include <signal.h>
#endif
#include <atomic>
#include <cassert>
#include <memory>
#include "absl/base/call_once.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
#if ABSL_THREAD_IDENTITY_MODE != ABSL_THREAD_IDENTITY_MODE_USE_CPP11
namespace {
// Used to co-ordinate one-time creation of our pthread_key
absl::once_flag init_thread_identity_key_once;
pthread_key_t thread_identity_pthread_key;
std::atomic<bool> pthread_key_initialized(false);
void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
pthread_key_create(&thread_identity_pthread_key, reclaimer);
pthread_key_initialized.store(true, std::memory_order_release);
}
} // namespace
#endif
#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
// The actual TLS storage for a thread's currently associated ThreadIdentity.
// This is referenced by inline accessors in the header.
// "protected" visibility ensures that if multiple instances of Abseil code
// exist within a process (via dlopen() or similar), references to
// thread_identity_ptr from each instance of the code will refer to
// *different* instances of this ptr.
#ifdef __GNUC__
__attribute__((visibility("protected")))
#endif // __GNUC__
ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr;
#endif // TLS or CPP11
void SetCurrentThreadIdentity(
ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer) {
assert(CurrentThreadIdentityIfPresent() == nullptr);
// Associate our destructor.
// NOTE: This call to pthread_setspecific is currently the only immovable
// barrier to CurrentThreadIdentity() always being async signal safe.
#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
// NOTE: Not async-safe. But can be open-coded.
absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
reclaimer);
#ifdef __EMSCRIPTEN__
// Emscripten PThread implementation does not support signals.
// See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html
// for more information.
pthread_setspecific(thread_identity_pthread_key,
reinterpret_cast<void*>(identity));
#else
// We must mask signals around the call to setspecific as with current glibc,
// a concurrent getspecific (needed for GetCurrentThreadIdentityIfPresent())
// may zero our value.
//
// While not officially async-signal safe, getspecific within a signal handler
// is otherwise OK.
sigset_t all_signals;
sigset_t curr_signals;
sigfillset(&all_signals);
pthread_sigmask(SIG_SETMASK, &all_signals, &curr_signals);
pthread_setspecific(thread_identity_pthread_key,
reinterpret_cast<void*>(identity));
pthread_sigmask(SIG_SETMASK, &curr_signals, nullptr);
#endif // !__EMSCRIPTEN__
#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS
// NOTE: Not async-safe. But can be open-coded.
absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
reclaimer);
pthread_setspecific(thread_identity_pthread_key,
reinterpret_cast<void*>(identity));
thread_identity_ptr = identity;
#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
thread_local std::unique_ptr<ThreadIdentity, ThreadIdentityReclaimerFunction>
holder(identity, reclaimer);
thread_identity_ptr = identity;
#else
#error Unimplemented ABSL_THREAD_IDENTITY_MODE
#endif
}
void ClearCurrentThreadIdentity() {
#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
thread_identity_ptr = nullptr;
#elif ABSL_THREAD_IDENTITY_MODE == \
ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
// pthread_setspecific expected to clear value on destruction
assert(CurrentThreadIdentityIfPresent() == nullptr);
#endif
}
#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
ThreadIdentity* CurrentThreadIdentityIfPresent() {
bool initialized = pthread_key_initialized.load(std::memory_order_acquire);
if (!initialized) {
return nullptr;
}
return reinterpret_cast<ThreadIdentity*>(
pthread_getspecific(thread_identity_pthread_key));
}
#endif
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,243 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Each active thread has an ThreadIdentity that may represent the thread in
// various level interfaces. ThreadIdentity objects are never deallocated.
// When a thread terminates, its ThreadIdentity object may be reused for a
// thread created later.
#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
#ifndef _WIN32
#include <pthread.h>
// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when
// supported.
#include <unistd.h>
#endif
#include <atomic>
#include <cstdint>
#include "absl/base/internal/per_thread_tls.h"
namespace absl {
inline namespace lts_2019_08_08 {
struct SynchLocksHeld;
struct SynchWaitParams;
namespace base_internal {
class SpinLock;
struct ThreadIdentity;
// Used by the implementation of absl::Mutex and absl::CondVar.
struct PerThreadSynch {
// The internal representation of absl::Mutex and absl::CondVar rely
// on the alignment of PerThreadSynch. Both store the address of the
// PerThreadSynch in the high-order bits of their internal state,
// which means the low kLowZeroBits of the address of PerThreadSynch
// must be zero.
static constexpr int kLowZeroBits = 8;
static constexpr int kAlignment = 1 << kLowZeroBits;
// Returns the associated ThreadIdentity.
// This can be implemented as a cast because we guarantee
// PerThreadSynch is the first element of ThreadIdentity.
ThreadIdentity* thread_identity() {
return reinterpret_cast<ThreadIdentity*>(this);
}
PerThreadSynch *next; // Circular waiter queue; initialized to 0.
PerThreadSynch *skip; // If non-zero, all entries in Mutex queue
// up to and including "skip" have same
// condition as this, and will be woken later
bool may_skip; // if false while on mutex queue, a mutex unlocker
// is using this PerThreadSynch as a terminator. Its
// skip field must not be filled in because the loop
// might then skip over the terminator.
// The wait parameters of the current wait. waitp is null if the
// thread is not waiting. Transitions from null to non-null must
// occur before the enqueue commit point (state = kQueued in
// Enqueue() and CondVarEnqueue()). Transitions from non-null to
// null must occur after the wait is finished (state = kAvailable in
// Mutex::Block() and CondVar::WaitCommon()). This field may be
// changed only by the thread that describes this PerThreadSynch. A
// special case is Fer(), which calls Enqueue() on another thread,
// but with an identical SynchWaitParams pointer, thus leaving the
// pointer unchanged.
SynchWaitParams *waitp;
bool suppress_fatal_errors; // If true, try to proceed even in the face of
// broken invariants. This is used within fatal
// signal handlers to improve the chances of
// debug logging information being output
// successfully.
intptr_t readers; // Number of readers in mutex.
int priority; // Priority of thread (updated every so often).
// When priority will next be read (cycles).
int64_t next_priority_read_cycles;
// State values:
// kAvailable: This PerThreadSynch is available.
// kQueued: This PerThreadSynch is unavailable, it's currently queued on a
// Mutex or CondVar waistlist.
//
// Transitions from kQueued to kAvailable require a release
// barrier. This is needed as a waiter may use "state" to
// independently observe that it's no longer queued.
//
// Transitions from kAvailable to kQueued require no barrier, they
// are externally ordered by the Mutex.
enum State {
kAvailable,
kQueued
};
std::atomic<State> state;
bool maybe_unlocking; // Valid at head of Mutex waiter queue;
// true if UnlockSlow could be searching
// for a waiter to wake. Used for an optimization
// in Enqueue(). true is always a valid value.
// Can be reset to false when the unlocker or any
// writer releases the lock, or a reader fully releases
// the lock. It may not be set to false by a reader
// that decrements the count to non-zero.
// protected by mutex spinlock
bool wake; // This thread is to be woken from a Mutex.
// If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
// waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
//
// The value of "x->cond_waiter" is meaningless if "x" is not on a
// Mutex waiter list.
bool cond_waiter;
// Locks held; used during deadlock detection.
// Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
SynchLocksHeld *all_locks;
};
struct ThreadIdentity {
// Must be the first member. The Mutex implementation requires that
// the PerThreadSynch object associated with each thread is
// PerThreadSynch::kAlignment aligned. We provide this alignment on
// ThreadIdentity itself.
PerThreadSynch per_thread_synch;
// Private: Reserved for absl::synchronization_internal::Waiter.
struct WaiterState {
char data[128];
} waiter_state;
// Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
std::atomic<int>* blocked_count_ptr;
// The following variables are mostly read/written just by the
// thread itself. The only exception is that these are read by
// a ticker thread as a hint.
std::atomic<int> ticker; // Tick counter, incremented once per second.
std::atomic<int> wait_start; // Ticker value when thread started waiting.
std::atomic<bool> is_idle; // Has thread become idle yet?
ThreadIdentity* next;
};
// Returns the ThreadIdentity object representing the calling thread; guaranteed
// to be unique for its lifetime. The returned object will remain valid for the
// program's lifetime; although it may be re-assigned to a subsequent thread.
// If one does not exist, return nullptr instead.
//
// Does not malloc(*), and is async-signal safe.
// [*] Technically pthread_setspecific() does malloc on first use; however this
// is handled internally within tcmalloc's initialization already.
//
// New ThreadIdentity objects can be constructed and associated with a thread
// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
ThreadIdentity* CurrentThreadIdentityIfPresent();
using ThreadIdentityReclaimerFunction = void (*)(void*);
// Sets the current thread identity to the given value. 'reclaimer' is a
// pointer to the global function for cleaning up instances on thread
// destruction.
void SetCurrentThreadIdentity(ThreadIdentity* identity,
ThreadIdentityReclaimerFunction reclaimer);
// Removes the currently associated ThreadIdentity from the running thread.
// This must be called from inside the ThreadIdentityReclaimerFunction, and only
// from that function.
void ClearCurrentThreadIdentity();
// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode
// index>
#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be direcly set
#else
#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0
#endif
#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS
#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be direcly set
#else
#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1
#endif
#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be direcly set
#else
#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2
#endif
#ifdef ABSL_THREAD_IDENTITY_MODE
#error ABSL_THREAD_IDENTITY_MODE cannot be direcly set
#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE)
#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
#elif defined(_WIN32)
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
(__GOOGLE_GRTE_VERSION__ >= 20140228L)
// Support for async-safe TLS was specifically added in GRTEv4. It's not
// present in the upstream eglibc.
// Note: Current default for production systems.
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS
#else
#define ABSL_THREAD_IDENTITY_MODE \
ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
#endif
#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr;
inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
return thread_identity_ptr;
}
#elif ABSL_THREAD_IDENTITY_MODE != \
ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
#error Unknown ABSL_THREAD_IDENTITY_MODE
#endif
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_

View File

@@ -0,0 +1,38 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include "absl/base/internal/thread_identity.h"
#include "absl/synchronization/internal/create_thread_identity.h"
#include "absl/synchronization/internal/per_thread_sem.h"
namespace {
void BM_SafeCurrentThreadIdentity(benchmark::State& state) {
for (auto _ : state) {
benchmark::DoNotOptimize(
absl::synchronization_internal::GetOrCreateCurrentThreadIdentity());
}
}
BENCHMARK(BM_SafeCurrentThreadIdentity);
void BM_UnsafeCurrentThreadIdentity(benchmark::State& state) {
for (auto _ : state) {
benchmark::DoNotOptimize(
absl::base_internal::CurrentThreadIdentityIfPresent());
}
}
BENCHMARK(BM_UnsafeCurrentThreadIdentity);
} // namespace

View File

@@ -0,0 +1,128 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/thread_identity.h"
#include <thread> // NOLINT(build/c++11)
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/internal/spinlock.h"
#include "absl/base/macros.h"
#include "absl/synchronization/internal/per_thread_sem.h"
#include "absl/synchronization/mutex.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
namespace {
// protects num_identities_reused
static absl::base_internal::SpinLock map_lock(
absl::base_internal::kLinkerInitialized);
static int num_identities_reused;
static const void* const kCheckNoIdentity = reinterpret_cast<void*>(1);
static void TestThreadIdentityCurrent(const void* assert_no_identity) {
ThreadIdentity* identity;
// We have to test this conditionally, because if the test framework relies
// on Abseil, then some previous action may have already allocated an
// identity.
if (assert_no_identity == kCheckNoIdentity) {
identity = CurrentThreadIdentityIfPresent();
EXPECT_TRUE(identity == nullptr);
}
identity = synchronization_internal::GetOrCreateCurrentThreadIdentity();
EXPECT_TRUE(identity != nullptr);
ThreadIdentity* identity_no_init;
identity_no_init = CurrentThreadIdentityIfPresent();
EXPECT_TRUE(identity == identity_no_init);
// Check that per_thread_synch is correctly aligned.
EXPECT_EQ(0, reinterpret_cast<intptr_t>(&identity->per_thread_synch) %
PerThreadSynch::kAlignment);
EXPECT_EQ(identity, identity->per_thread_synch.thread_identity());
absl::base_internal::SpinLockHolder l(&map_lock);
num_identities_reused++;
}
TEST(ThreadIdentityTest, BasicIdentityWorks) {
// This tests for the main() thread.
TestThreadIdentityCurrent(nullptr);
}
TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) {
// Now try the same basic test with multiple threads being created and
// destroyed. This makes sure that:
// - New threads are created without a ThreadIdentity.
// - We re-allocate ThreadIdentity objects from the free-list.
// - If a thread implementation chooses to recycle threads, that
// correct re-initialization occurs.
static const int kNumLoops = 3;
static const int kNumThreads = 400;
for (int iter = 0; iter < kNumLoops; iter++) {
std::vector<std::thread> threads;
for (int i = 0; i < kNumThreads; ++i) {
threads.push_back(
std::thread(TestThreadIdentityCurrent, kCheckNoIdentity));
}
for (auto& thread : threads) {
thread.join();
}
}
// We should have recycled ThreadIdentity objects above; while (external)
// library threads allocating their own identities may preclude some
// reuse, we should have sufficient repetitions to exclude this.
EXPECT_LT(kNumThreads, num_identities_reused);
}
TEST(ThreadIdentityTest, ReusedThreadIdentityMutexTest) {
// This test repeatly creates and joins a series of threads, each of
// which acquires and releases shared Mutex locks. This verifies
// Mutex operations work correctly under a reused
// ThreadIdentity. Note that the most likely failure mode of this
// test is a crash or deadlock.
static const int kNumLoops = 10;
static const int kNumThreads = 12;
static const int kNumMutexes = 3;
static const int kNumLockLoops = 5;
Mutex mutexes[kNumMutexes];
for (int iter = 0; iter < kNumLoops; ++iter) {
std::vector<std::thread> threads;
for (int thread = 0; thread < kNumThreads; ++thread) {
threads.push_back(std::thread([&]() {
for (int l = 0; l < kNumLockLoops; ++l) {
for (int m = 0; m < kNumMutexes; ++m) {
MutexLock lock(&mutexes[m]);
}
}
}));
}
for (auto& thread : threads) {
thread.join();
}
}
}
} // namespace
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,108 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/throw_delegate.h"
#include <cstdlib>
#include <functional>
#include <new>
#include <stdexcept>
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
namespace {
template <typename T>
[[noreturn]] void Throw(const T& error) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw error;
#else
ABSL_RAW_LOG(FATAL, "%s", error.what());
std::abort();
#endif
}
} // namespace
void ThrowStdLogicError(const std::string& what_arg) {
Throw(std::logic_error(what_arg));
}
void ThrowStdLogicError(const char* what_arg) {
Throw(std::logic_error(what_arg));
}
void ThrowStdInvalidArgument(const std::string& what_arg) {
Throw(std::invalid_argument(what_arg));
}
void ThrowStdInvalidArgument(const char* what_arg) {
Throw(std::invalid_argument(what_arg));
}
void ThrowStdDomainError(const std::string& what_arg) {
Throw(std::domain_error(what_arg));
}
void ThrowStdDomainError(const char* what_arg) {
Throw(std::domain_error(what_arg));
}
void ThrowStdLengthError(const std::string& what_arg) {
Throw(std::length_error(what_arg));
}
void ThrowStdLengthError(const char* what_arg) {
Throw(std::length_error(what_arg));
}
void ThrowStdOutOfRange(const std::string& what_arg) {
Throw(std::out_of_range(what_arg));
}
void ThrowStdOutOfRange(const char* what_arg) {
Throw(std::out_of_range(what_arg));
}
void ThrowStdRuntimeError(const std::string& what_arg) {
Throw(std::runtime_error(what_arg));
}
void ThrowStdRuntimeError(const char* what_arg) {
Throw(std::runtime_error(what_arg));
}
void ThrowStdRangeError(const std::string& what_arg) {
Throw(std::range_error(what_arg));
}
void ThrowStdRangeError(const char* what_arg) {
Throw(std::range_error(what_arg));
}
void ThrowStdOverflowError(const std::string& what_arg) {
Throw(std::overflow_error(what_arg));
}
void ThrowStdOverflowError(const char* what_arg) {
Throw(std::overflow_error(what_arg));
}
void ThrowStdUnderflowError(const std::string& what_arg) {
Throw(std::underflow_error(what_arg));
}
void ThrowStdUnderflowError(const char* what_arg) {
Throw(std::underflow_error(what_arg));
}
void ThrowStdBadFunctionCall() { Throw(std::bad_function_call()); }
void ThrowStdBadAlloc() { Throw(std::bad_alloc()); }
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,73 @@
//
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
#define ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
#include <string>
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
// Helper functions that allow throwing exceptions consistently from anywhere.
// The main use case is for header-based libraries (eg templates), as they will
// be built by many different targets with their own compiler options.
// In particular, this will allow a safe way to throw exceptions even if the
// caller is compiled with -fno-exceptions. This is intended for implementing
// things like map<>::at(), which the standard documents as throwing an
// exception on error.
//
// Using other techniques like #if tricks could lead to ODR violations.
//
// You shouldn't use it unless you're writing code that you know will be built
// both with and without exceptions and you need to conform to an interface
// that uses exceptions.
[[noreturn]] void ThrowStdLogicError(const std::string& what_arg);
[[noreturn]] void ThrowStdLogicError(const char* what_arg);
[[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg);
[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg);
[[noreturn]] void ThrowStdDomainError(const std::string& what_arg);
[[noreturn]] void ThrowStdDomainError(const char* what_arg);
[[noreturn]] void ThrowStdLengthError(const std::string& what_arg);
[[noreturn]] void ThrowStdLengthError(const char* what_arg);
[[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg);
[[noreturn]] void ThrowStdOutOfRange(const char* what_arg);
[[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg);
[[noreturn]] void ThrowStdRuntimeError(const char* what_arg);
[[noreturn]] void ThrowStdRangeError(const std::string& what_arg);
[[noreturn]] void ThrowStdRangeError(const char* what_arg);
[[noreturn]] void ThrowStdOverflowError(const std::string& what_arg);
[[noreturn]] void ThrowStdOverflowError(const char* what_arg);
[[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg);
[[noreturn]] void ThrowStdUnderflowError(const char* what_arg);
[[noreturn]] void ThrowStdBadFunctionCall();
[[noreturn]] void ThrowStdBadAlloc();
// ThrowStdBadArrayNewLength() cannot be consistently supported because
// std::bad_array_new_length is missing in libstdc++ until 4.9.0.
// https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html
// https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html
// libcxx (as of 3.2) and msvc (as of 2015) both have it.
// [[noreturn]] void ThrowStdBadArrayNewLength();
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_

View File

@@ -0,0 +1,66 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This file is intended solely for spinlock.h.
// It provides ThreadSanitizer annotations for custom mutexes.
// See <sanitizer/tsan_interface.h> for meaning of these annotations.
#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
// ABSL_INTERNAL_HAVE_TSAN_INTERFACE
// Macro intended only for internal use.
//
// Checks whether LLVM Thread Sanitizer interfaces are available.
// First made available in LLVM 5.0 (Sep 2017).
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set."
#endif
#if defined(THREAD_SANITIZER) && defined(__has_include)
#if __has_include(<sanitizer/tsan_interface.h>)
#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1
#endif
#endif
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
#include <sanitizer/tsan_interface.h>
#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create
#define ABSL_TSAN_MUTEX_DESTROY __tsan_mutex_destroy
#define ABSL_TSAN_MUTEX_PRE_LOCK __tsan_mutex_pre_lock
#define ABSL_TSAN_MUTEX_POST_LOCK __tsan_mutex_post_lock
#define ABSL_TSAN_MUTEX_PRE_UNLOCK __tsan_mutex_pre_unlock
#define ABSL_TSAN_MUTEX_POST_UNLOCK __tsan_mutex_post_unlock
#define ABSL_TSAN_MUTEX_PRE_SIGNAL __tsan_mutex_pre_signal
#define ABSL_TSAN_MUTEX_POST_SIGNAL __tsan_mutex_post_signal
#define ABSL_TSAN_MUTEX_PRE_DIVERT __tsan_mutex_pre_divert
#define ABSL_TSAN_MUTEX_POST_DIVERT __tsan_mutex_post_divert
#else
#define ABSL_TSAN_MUTEX_CREATE(...)
#define ABSL_TSAN_MUTEX_DESTROY(...)
#define ABSL_TSAN_MUTEX_PRE_LOCK(...)
#define ABSL_TSAN_MUTEX_POST_LOCK(...)
#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...)
#define ABSL_TSAN_MUTEX_POST_UNLOCK(...)
#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...)
#define ABSL_TSAN_MUTEX_POST_SIGNAL(...)
#define ABSL_TSAN_MUTEX_PRE_DIVERT(...)
#define ABSL_TSAN_MUTEX_POST_DIVERT(...)
#endif
#endif // ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_

View File

@@ -0,0 +1,156 @@
//
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
#define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
#include <string.h>
#include <cstdint>
#include "absl/base/attributes.h"
// unaligned APIs
// Portable handling of unaligned loads, stores, and copies.
// The unaligned API is C++ only. The declarations use C++ features
// (namespaces, inline) which are absent or incompatible in C.
#if defined(__cplusplus)
#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||\
defined(MEMORY_SANITIZER)
// Consider we have an unaligned load/store of 4 bytes from address 0x...05.
// AddressSanitizer will treat it as a 3-byte access to the range 05:07 and
// will miss a bug if 08 is the first unaddressable byte.
// ThreadSanitizer will also treat this as a 3-byte access to 05:07 and will
// miss a race between this access and some other accesses to 08.
// MemorySanitizer will correctly propagate the shadow on unaligned stores
// and correctly report bugs on unaligned loads, but it may not properly
// update and report the origin of the uninitialized memory.
// For all three tools, replacing an unaligned access with a tool-specific
// callback solves the problem.
// Make sure uint16_t/uint32_t/uint64_t are defined.
#include <stdint.h>
extern "C" {
uint16_t __sanitizer_unaligned_load16(const void *p);
uint32_t __sanitizer_unaligned_load32(const void *p);
uint64_t __sanitizer_unaligned_load64(const void *p);
void __sanitizer_unaligned_store16(void *p, uint16_t v);
void __sanitizer_unaligned_store32(void *p, uint32_t v);
void __sanitizer_unaligned_store64(void *p, uint64_t v);
} // extern "C"
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
inline uint16_t UnalignedLoad16(const void *p) {
return __sanitizer_unaligned_load16(p);
}
inline uint32_t UnalignedLoad32(const void *p) {
return __sanitizer_unaligned_load32(p);
}
inline uint64_t UnalignedLoad64(const void *p) {
return __sanitizer_unaligned_load64(p);
}
inline void UnalignedStore16(void *p, uint16_t v) {
__sanitizer_unaligned_store16(p, v);
}
inline void UnalignedStore32(void *p, uint32_t v) {
__sanitizer_unaligned_store32(p, v);
}
inline void UnalignedStore64(void *p, uint64_t v) {
__sanitizer_unaligned_store64(p, v);
}
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \
(absl::base_internal::UnalignedLoad16(_p))
#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \
(absl::base_internal::UnalignedLoad32(_p))
#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \
(absl::base_internal::UnalignedLoad64(_p))
#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
(absl::base_internal::UnalignedStore16(_p, _val))
#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
(absl::base_internal::UnalignedStore32(_p, _val))
#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
(absl::base_internal::UnalignedStore64(_p, _val))
#else
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
inline uint16_t UnalignedLoad16(const void *p) {
uint16_t t;
memcpy(&t, p, sizeof t);
return t;
}
inline uint32_t UnalignedLoad32(const void *p) {
uint32_t t;
memcpy(&t, p, sizeof t);
return t;
}
inline uint64_t UnalignedLoad64(const void *p) {
uint64_t t;
memcpy(&t, p, sizeof t);
return t;
}
inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); }
inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); }
inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); }
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \
(absl::base_internal::UnalignedLoad16(_p))
#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \
(absl::base_internal::UnalignedLoad32(_p))
#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \
(absl::base_internal::UnalignedLoad64(_p))
#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
(absl::base_internal::UnalignedStore16(_p, _val))
#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
(absl::base_internal::UnalignedStore32(_p, _val))
#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
(absl::base_internal::UnalignedStore64(_p, _val))
#endif
#endif // defined(__cplusplus), end of unaligned API
#endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_

View File

@@ -0,0 +1,103 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/unscaledcycleclock.h"
#if ABSL_USE_UNSCALED_CYCLECLOCK
#if defined(_WIN32)
#include <intrin.h>
#endif
#if defined(__powerpc__) || defined(__ppc__)
#include <sys/platform/ppc.h>
#endif
#include "absl/base/internal/sysinfo.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
#if defined(__i386__)
int64_t UnscaledCycleClock::Now() {
int64_t ret;
__asm__ volatile("rdtsc" : "=A"(ret));
return ret;
}
double UnscaledCycleClock::Frequency() {
return base_internal::NominalCPUFrequency();
}
#elif defined(__x86_64__)
int64_t UnscaledCycleClock::Now() {
uint64_t low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
return (high << 32) | low;
}
double UnscaledCycleClock::Frequency() {
return base_internal::NominalCPUFrequency();
}
#elif defined(__powerpc__) || defined(__ppc__)
int64_t UnscaledCycleClock::Now() {
return __ppc_get_timebase();
}
double UnscaledCycleClock::Frequency() {
return __ppc_get_timebase_freq();
}
#elif defined(__aarch64__)
// System timer of ARMv8 runs at a different frequency than the CPU's.
// The frequency is fixed, typically in the range 1-50MHz. It can be
// read at CNTFRQ special register. We assume the OS has set up
// the virtual timer properly.
int64_t UnscaledCycleClock::Now() {
int64_t virtual_timer_value;
asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
return virtual_timer_value;
}
double UnscaledCycleClock::Frequency() {
uint64_t aarch64_timer_frequency;
asm volatile("mrs %0, cntfrq_el0" : "=r"(aarch64_timer_frequency));
return aarch64_timer_frequency;
}
#elif defined(_M_IX86) || defined(_M_X64)
#pragma intrinsic(__rdtsc)
int64_t UnscaledCycleClock::Now() {
return __rdtsc();
}
double UnscaledCycleClock::Frequency() {
return base_internal::NominalCPUFrequency();
}
#endif
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_USE_UNSCALED_CYCLECLOCK

View File

@@ -0,0 +1,124 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// UnscaledCycleClock
// An UnscaledCycleClock yields the value and frequency of a cycle counter
// that increments at a rate that is approximately constant.
// This class is for internal / whitelisted use only, you should consider
// using CycleClock instead.
//
// Notes:
// The cycle counter frequency is not necessarily the core clock frequency.
// That is, CycleCounter cycles are not necessarily "CPU cycles".
//
// An arbitrary offset may have been added to the counter at power on.
//
// On some platforms, the rate and offset of the counter may differ
// slightly when read from different CPUs of a multiprocessor. Usually,
// we try to ensure that the operating system adjusts values periodically
// so that values agree approximately. If you need stronger guarantees,
// consider using alternate interfaces.
//
// The CPU is not required to maintain the ordering of a cycle counter read
// with respect to surrounding instructions.
#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
#include <cstdint>
#if defined(__APPLE__)
#include <TargetConditionals.h>
#endif
#include "absl/base/port.h"
// The following platforms have an implementation of a hardware counter.
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
defined(__powerpc__) || defined(__ppc__) || \
defined(_M_IX86) || defined(_M_X64)
#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1
#else
#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0
#endif
// The following platforms often disable access to the hardware
// counter (through a sandbox) even if the underlying hardware has a
// usable counter. The CycleTimer interface also requires a *scaled*
// CycleClock that runs at atleast 1 MHz. We've found some Android
// ARM64 devices where this is not the case, so we disable it by
// default on Android ARM64.
#if defined(__native_client__) || \
(defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || \
(defined(__ANDROID__) && defined(__aarch64__))
#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0
#else
#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1
#endif
// UnscaledCycleClock is an optional internal feature.
// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence.
// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1
#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK)
#define ABSL_USE_UNSCALED_CYCLECLOCK \
(ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \
ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT)
#endif
#if ABSL_USE_UNSCALED_CYCLECLOCK
// This macro can be used to test if UnscaledCycleClock::Frequency()
// is NominalCPUFrequency() on a particular platform.
#if (defined(__i386__) || defined(__x86_64__) || \
defined(_M_IX86) || defined(_M_X64))
#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
#endif
namespace absl {
inline namespace lts_2019_08_08 {
namespace time_internal {
class UnscaledCycleClockWrapperForGetCurrentTime;
} // namespace time_internal
namespace base_internal {
class CycleClock;
class UnscaledCycleClockWrapperForInitializeFrequency;
class UnscaledCycleClock {
private:
UnscaledCycleClock() = delete;
// Return the value of a cycle counter that counts at a rate that is
// approximately constant.
static int64_t Now();
// Return the how much UnscaledCycleClock::Now() increases per second.
// This is not necessarily the core CPU clock frequency.
// It may be the nominal value report by the kernel, rather than a measured
// value.
static double Frequency();
// Whitelisted friends.
friend class base_internal::CycleClock;
friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime;
friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;
};
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_USE_UNSCALED_CYCLECLOCK
#endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_

View File

@@ -0,0 +1,223 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/invoke.h"
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
namespace {
int Function(int a, int b) { return a - b; }
int Sink(std::unique_ptr<int> p) {
return *p;
}
std::unique_ptr<int> Factory(int n) {
return make_unique<int>(n);
}
void NoOp() {}
struct ConstFunctor {
int operator()(int a, int b) const { return a - b; }
};
struct MutableFunctor {
int operator()(int a, int b) { return a - b; }
};
struct EphemeralFunctor {
int operator()(int a, int b) && { return a - b; }
};
struct OverloadedFunctor {
template <typename... Args>
std::string operator()(const Args&... args) & {
return StrCat("&", args...);
}
template <typename... Args>
std::string operator()(const Args&... args) const& {
return StrCat("const&", args...);
}
template <typename... Args>
std::string operator()(const Args&... args) && {
return StrCat("&&", args...);
}
};
struct Class {
int Method(int a, int b) { return a - b; }
int ConstMethod(int a, int b) const { return a - b; }
int RefMethod(int a, int b) & { return a - b; }
int RefRefMethod(int a, int b) && { return a - b; }
int NoExceptMethod(int a, int b) noexcept { return a - b; }
int VolatileMethod(int a, int b) volatile { return a - b; }
int member;
};
struct FlipFlop {
int ConstMethod() const { return member; }
FlipFlop operator*() const { return {-member}; }
int member;
};
// CallMaybeWithArg(f) resolves either to Invoke(f) or Invoke(f, 42), depending
// on which one is valid.
template <typename F>
decltype(Invoke(std::declval<const F&>())) CallMaybeWithArg(const F& f) {
return Invoke(f);
}
template <typename F>
decltype(Invoke(std::declval<const F&>(), 42)) CallMaybeWithArg(const F& f) {
return Invoke(f, 42);
}
TEST(InvokeTest, Function) {
EXPECT_EQ(1, Invoke(Function, 3, 2));
EXPECT_EQ(1, Invoke(&Function, 3, 2));
}
TEST(InvokeTest, NonCopyableArgument) {
EXPECT_EQ(42, Invoke(Sink, make_unique<int>(42)));
}
TEST(InvokeTest, NonCopyableResult) {
EXPECT_THAT(Invoke(Factory, 42), ::testing::Pointee(42));
}
TEST(InvokeTest, VoidResult) {
Invoke(NoOp);
}
TEST(InvokeTest, ConstFunctor) {
EXPECT_EQ(1, Invoke(ConstFunctor(), 3, 2));
}
TEST(InvokeTest, MutableFunctor) {
MutableFunctor f;
EXPECT_EQ(1, Invoke(f, 3, 2));
EXPECT_EQ(1, Invoke(MutableFunctor(), 3, 2));
}
TEST(InvokeTest, EphemeralFunctor) {
EphemeralFunctor f;
EXPECT_EQ(1, Invoke(std::move(f), 3, 2));
EXPECT_EQ(1, Invoke(EphemeralFunctor(), 3, 2));
}
TEST(InvokeTest, OverloadedFunctor) {
OverloadedFunctor f;
const OverloadedFunctor& cf = f;
EXPECT_EQ("&", Invoke(f));
EXPECT_EQ("& 42", Invoke(f, " 42"));
EXPECT_EQ("const&", Invoke(cf));
EXPECT_EQ("const& 42", Invoke(cf, " 42"));
EXPECT_EQ("&&", Invoke(std::move(f)));
EXPECT_EQ("&& 42", Invoke(std::move(f), " 42"));
}
TEST(InvokeTest, ReferenceWrapper) {
ConstFunctor cf;
MutableFunctor mf;
EXPECT_EQ(1, Invoke(std::cref(cf), 3, 2));
EXPECT_EQ(1, Invoke(std::ref(cf), 3, 2));
EXPECT_EQ(1, Invoke(std::ref(mf), 3, 2));
}
TEST(InvokeTest, MemberFunction) {
std::unique_ptr<Class> p(new Class);
std::unique_ptr<const Class> cp(new Class);
std::unique_ptr<volatile Class> vp(new Class);
EXPECT_EQ(1, Invoke(&Class::Method, p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::Method, p.get(), 3, 2));
EXPECT_EQ(1, Invoke(&Class::Method, *p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::RefMethod, p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::RefMethod, p.get(), 3, 2));
EXPECT_EQ(1, Invoke(&Class::RefMethod, *p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::RefRefMethod, std::move(*p), 3, 2)); // NOLINT
EXPECT_EQ(1, Invoke(&Class::NoExceptMethod, p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::NoExceptMethod, p.get(), 3, 2));
EXPECT_EQ(1, Invoke(&Class::NoExceptMethod, *p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::ConstMethod, p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::ConstMethod, p.get(), 3, 2));
EXPECT_EQ(1, Invoke(&Class::ConstMethod, *p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::ConstMethod, cp, 3, 2));
EXPECT_EQ(1, Invoke(&Class::ConstMethod, cp.get(), 3, 2));
EXPECT_EQ(1, Invoke(&Class::ConstMethod, *cp, 3, 2));
EXPECT_EQ(1, Invoke(&Class::VolatileMethod, p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::VolatileMethod, p.get(), 3, 2));
EXPECT_EQ(1, Invoke(&Class::VolatileMethod, *p, 3, 2));
EXPECT_EQ(1, Invoke(&Class::VolatileMethod, vp, 3, 2));
EXPECT_EQ(1, Invoke(&Class::VolatileMethod, vp.get(), 3, 2));
EXPECT_EQ(1, Invoke(&Class::VolatileMethod, *vp, 3, 2));
EXPECT_EQ(1, Invoke(&Class::Method, make_unique<Class>(), 3, 2));
EXPECT_EQ(1, Invoke(&Class::ConstMethod, make_unique<Class>(), 3, 2));
EXPECT_EQ(1, Invoke(&Class::ConstMethod, make_unique<const Class>(), 3, 2));
}
TEST(InvokeTest, DataMember) {
std::unique_ptr<Class> p(new Class{42});
std::unique_ptr<const Class> cp(new Class{42});
EXPECT_EQ(42, Invoke(&Class::member, p));
EXPECT_EQ(42, Invoke(&Class::member, *p));
EXPECT_EQ(42, Invoke(&Class::member, p.get()));
Invoke(&Class::member, p) = 42;
Invoke(&Class::member, p.get()) = 42;
EXPECT_EQ(42, Invoke(&Class::member, cp));
EXPECT_EQ(42, Invoke(&Class::member, *cp));
EXPECT_EQ(42, Invoke(&Class::member, cp.get()));
}
TEST(InvokeTest, FlipFlop) {
FlipFlop obj = {42};
// This call could resolve to (obj.*&FlipFlop::ConstMethod)() or
// ((*obj).*&FlipFlop::ConstMethod)(). We verify that it's the former.
EXPECT_EQ(42, Invoke(&FlipFlop::ConstMethod, obj));
EXPECT_EQ(42, Invoke(&FlipFlop::member, obj));
}
TEST(InvokeTest, SfinaeFriendly) {
CallMaybeWithArg(NoOp);
EXPECT_THAT(CallMaybeWithArg(Factory), ::testing::Pointee(42));
}
} // namespace
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,27 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/log_severity.h"
#include <ostream>
namespace absl {
inline namespace lts_2019_08_08 {
std::ostream& operator<<(std::ostream& os, absl::LogSeverity s) {
if (s == absl::NormalizeLogSeverity(s)) return os << absl::LogSeverityName(s);
return os << "absl::LogSeverity(" << static_cast<int>(s) << ")";
}
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,73 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_BASE_INTERNAL_LOG_SEVERITY_H_
#define ABSL_BASE_INTERNAL_LOG_SEVERITY_H_
#include <array>
#include <ostream>
#include "absl/base/attributes.h"
namespace absl {
inline namespace lts_2019_08_08 {
// Four severity levels are defined. Logging APIs should terminate the program
// when a message is logged at severity `kFatal`; the other levels have no
// special semantics.
enum class LogSeverity : int {
kInfo = 0,
kWarning = 1,
kError = 2,
kFatal = 3,
};
// Returns an iterable of all standard `absl::LogSeverity` values, ordered from
// least to most severe.
constexpr std::array<absl::LogSeverity, 4> LogSeverities() {
return {{absl::LogSeverity::kInfo, absl::LogSeverity::kWarning,
absl::LogSeverity::kError, absl::LogSeverity::kFatal}};
}
// Returns the all-caps string representation (e.g. "INFO") of the specified
// severity level if it is one of the normal levels and "UNKNOWN" otherwise.
constexpr const char* LogSeverityName(absl::LogSeverity s) {
return s == absl::LogSeverity::kInfo
? "INFO"
: s == absl::LogSeverity::kWarning
? "WARNING"
: s == absl::LogSeverity::kError
? "ERROR"
: s == absl::LogSeverity::kFatal ? "FATAL" : "UNKNOWN";
}
// Values less than `kInfo` normalize to `kInfo`; values greater than `kFatal`
// normalize to `kError` (**NOT** `kFatal`).
constexpr absl::LogSeverity NormalizeLogSeverity(absl::LogSeverity s) {
return s < absl::LogSeverity::kInfo
? absl::LogSeverity::kInfo
: s > absl::LogSeverity::kFatal ? absl::LogSeverity::kError : s;
}
constexpr absl::LogSeverity NormalizeLogSeverity(int s) {
return NormalizeLogSeverity(static_cast<absl::LogSeverity>(s));
}
// The exact representation of a streamed `absl::LogSeverity` is deliberately
// unspecified; do not rely on it.
std::ostream& operator<<(std::ostream& os, absl::LogSeverity s);
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_INTERNAL_LOG_SEVERITY_H_

View File

@@ -0,0 +1,43 @@
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/log_severity.h"
#include <sstream>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace {
using testing::Eq;
std::string StreamHelper(absl::LogSeverity value) {
std::ostringstream stream;
stream << value;
return stream.str();
}
TEST(StreamTest, Works) {
EXPECT_THAT(StreamHelper(static_cast<absl::LogSeverity>(-100)),
Eq("absl::LogSeverity(-100)"));
EXPECT_THAT(StreamHelper(absl::LogSeverity::kInfo), Eq("INFO"));
EXPECT_THAT(StreamHelper(absl::LogSeverity::kWarning), Eq("WARNING"));
EXPECT_THAT(StreamHelper(absl::LogSeverity::kError), Eq("ERROR"));
EXPECT_THAT(StreamHelper(absl::LogSeverity::kFatal), Eq("FATAL"));
EXPECT_THAT(StreamHelper(static_cast<absl::LogSeverity>(4)),
Eq("absl::LogSeverity(4)"));
}
} // namespace

217
ubuntu/absl/base/macros.h Normal file
View File

@@ -0,0 +1,217 @@
//
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: macros.h
// -----------------------------------------------------------------------------
//
// This header file defines the set of language macros used within Abseil code.
// For the set of macros used to determine supported compilers and platforms,
// see absl/base/config.h instead.
//
// This code is compiled directly on many platforms, including client
// platforms like Windows, Mac, and embedded systems. Before making
// any changes here, make sure that you're not breaking any platforms.
#ifndef ABSL_BASE_MACROS_H_
#define ABSL_BASE_MACROS_H_
#include <cassert>
#include <cstddef>
#include "absl/base/optimization.h"
#include "absl/base/port.h"
// ABSL_ARRAYSIZE()
//
// Returns the number of elements in an array as a compile-time constant, which
// can be used in defining new arrays. If you use this macro on a pointer by
// mistake, you will get a compile-time error.
#define ABSL_ARRAYSIZE(array) \
(sizeof(::absl::macros_internal::ArraySizeHelper(array)))
namespace absl {
inline namespace lts_2019_08_08 {
namespace macros_internal {
// Note: this internal template function declaration is used by ABSL_ARRAYSIZE.
// The function doesn't need a definition, as we only use its type.
template <typename T, size_t N>
auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N];
} // namespace macros_internal
} // inline namespace lts_2019_08_08
} // namespace absl
// kLinkerInitialized
//
// An enum used only as a constructor argument to indicate that a variable has
// static storage duration, and that the constructor should do nothing to its
// state. Use of this macro indicates to the reader that it is legal to
// declare a static instance of the class, provided the constructor is given
// the absl::base_internal::kLinkerInitialized argument.
//
// Normally, it is unsafe to declare a static variable that has a constructor or
// a destructor because invocation order is undefined. However, if the type can
// be zero-initialized (which the loader does for static variables) into a valid
// state and the type's destructor does not affect storage, then a constructor
// for static initialization can be declared.
//
// Example:
// // Declaration
// explicit MyClass(absl::base_internal:LinkerInitialized x) {}
//
// // Invocation
// static MyClass my_global(absl::base_internal::kLinkerInitialized);
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
enum LinkerInitialized {
kLinkerInitialized = 0,
};
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
// ABSL_FALLTHROUGH_INTENDED
//
// Annotates implicit fall-through between switch labels, allowing a case to
// indicate intentional fallthrough and turn off warnings about any lack of a
// `break` statement. The ABSL_FALLTHROUGH_INTENDED macro should be followed by
// a semicolon and can be used in most places where `break` can, provided that
// no statements exist between it and the next switch label.
//
// Example:
//
// switch (x) {
// case 40:
// case 41:
// if (truth_is_out_there) {
// ++x;
// ABSL_FALLTHROUGH_INTENDED; // Use instead of/along with annotations
// // in comments
// } else {
// return x;
// }
// case 42:
// ...
//
// Notes: when compiled with clang in C++11 mode, the ABSL_FALLTHROUGH_INTENDED
// macro is expanded to the [[clang::fallthrough]] attribute, which is analysed
// when performing switch labels fall-through diagnostic
// (`-Wimplicit-fallthrough`). See clang documentation on language extensions
// for details:
// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
//
// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro
// has no effect on diagnostics. In any case this macro has no effect on runtime
// behavior and performance of code.
#ifdef ABSL_FALLTHROUGH_INTENDED
#error "ABSL_FALLTHROUGH_INTENDED should not be defined."
#endif
// TODO(zhangxy): Use c++17 standard [[fallthrough]] macro, when supported.
#if defined(__clang__) && defined(__has_warning)
#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
#define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]]
#endif
#elif defined(__GNUC__) && __GNUC__ >= 7
#define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]]
#endif
#ifndef ABSL_FALLTHROUGH_INTENDED
#define ABSL_FALLTHROUGH_INTENDED \
do { \
} while (0)
#endif
// ABSL_DEPRECATED()
//
// Marks a deprecated class, struct, enum, function, method and variable
// declarations. The macro argument is used as a custom diagnostic message (e.g.
// suggestion of a better alternative).
//
// Example:
//
// class ABSL_DEPRECATED("Use Bar instead") Foo {...};
// ABSL_DEPRECATED("Use Baz instead") void Bar() {...}
//
// Every usage of a deprecated entity will trigger a warning when compiled with
// clang's `-Wdeprecated-declarations` option. This option is turned off by
// default, but the warnings will be reported by clang-tidy.
#if defined(__clang__) && __cplusplus >= 201103L
#define ABSL_DEPRECATED(message) __attribute__((deprecated(message)))
#endif
#ifndef ABSL_DEPRECATED
#define ABSL_DEPRECATED(message)
#endif
// ABSL_BAD_CALL_IF()
//
// Used on a function overload to trap bad calls: any call that matches the
// overload will cause a compile-time error. This macro uses a clang-specific
// "enable_if" attribute, as described at
// http://clang.llvm.org/docs/AttributeReference.html#enable-if
//
// Overloads which use this macro should be bracketed by
// `#ifdef ABSL_BAD_CALL_IF`.
//
// Example:
//
// int isdigit(int c);
// #ifdef ABSL_BAD_CALL_IF
// int isdigit(int c)
// ABSL_BAD_CALL_IF(c <= -1 || c > 255,
// "'c' must have the value of an unsigned char or EOF");
// #endif // ABSL_BAD_CALL_IF
#if defined(__clang__)
# if __has_attribute(enable_if)
# define ABSL_BAD_CALL_IF(expr, msg) \
__attribute__((enable_if(expr, "Bad call trap"), unavailable(msg)))
# endif
#endif
// ABSL_ASSERT()
//
// In C++11, `assert` can't be used portably within constexpr functions.
// ABSL_ASSERT functions as a runtime assert but works in C++11 constexpr
// functions. Example:
//
// constexpr double Divide(double a, double b) {
// return ABSL_ASSERT(b != 0), a / b;
// }
//
// This macro is inspired by
// https://akrzemi1.wordpress.com/2017/05/18/asserts-in-constexpr-functions/
#if defined(NDEBUG)
#define ABSL_ASSERT(expr) \
(false ? static_cast<void>(expr) : static_cast<void>(0))
#else
#define ABSL_ASSERT(expr) \
(ABSL_PREDICT_TRUE((expr)) ? static_cast<void>(0) \
: [] { assert(false && #expr); }()) // NOLINT
#endif
#ifdef ABSL_HAVE_EXCEPTIONS
#define ABSL_INTERNAL_TRY try
#define ABSL_INTERNAL_CATCH_ANY catch (...)
#define ABSL_INTERNAL_RETHROW do { throw; } while (false)
#else // ABSL_HAVE_EXCEPTIONS
#define ABSL_INTERNAL_TRY if (true)
#define ABSL_INTERNAL_CATCH_ANY else if (false)
#define ABSL_INTERNAL_RETHROW do {} while (false)
#endif // ABSL_HAVE_EXCEPTIONS
#endif // ABSL_BASE_MACROS_H_

View File

@@ -0,0 +1,181 @@
//
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: optimization.h
// -----------------------------------------------------------------------------
//
// This header file defines portable macros for performance optimization.
#ifndef ABSL_BASE_OPTIMIZATION_H_
#define ABSL_BASE_OPTIMIZATION_H_
#include "absl/base/config.h"
// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION
//
// Instructs the compiler to avoid optimizing tail-call recursion. Use of this
// macro is useful when you wish to preserve the existing function order within
// a stack trace for logging, debugging, or profiling purposes.
//
// Example:
//
// int f() {
// int result = g();
// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
// return result;
// }
#if defined(__pnacl__)
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; }
#elif defined(__clang__)
// Clang will not tail call given inline volatile assembly.
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("")
#elif defined(__GNUC__)
// GCC will not tail call given inline volatile assembly.
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("")
#elif defined(_MSC_VER)
#include <intrin.h>
// The __nop() intrinsic blocks the optimisation.
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop()
#else
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; }
#endif
// ABSL_CACHELINE_SIZE
//
// Explicitly defines the size of the L1 cache for purposes of alignment.
// Setting the cacheline size allows you to specify that certain objects be
// aligned on a cacheline boundary with `ABSL_CACHELINE_ALIGNED` declarations.
// (See below.)
//
// NOTE: this macro should be replaced with the following C++17 features, when
// those are generally available:
//
// * `std::hardware_constructive_interference_size`
// * `std::hardware_destructive_interference_size`
//
// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html
// for more information.
#if defined(__GNUC__)
// Cache line alignment
#if defined(__i386__) || defined(__x86_64__)
#define ABSL_CACHELINE_SIZE 64
#elif defined(__powerpc64__)
#define ABSL_CACHELINE_SIZE 128
#elif defined(__aarch64__)
// We would need to read special register ctr_el0 to find out L1 dcache size.
// This value is a good estimate based on a real aarch64 machine.
#define ABSL_CACHELINE_SIZE 64
#elif defined(__arm__)
// Cache line sizes for ARM: These values are not strictly correct since
// cache line sizes depend on implementations, not architectures. There
// are even implementations with cache line sizes configurable at boot
// time.
#if defined(__ARM_ARCH_5T__)
#define ABSL_CACHELINE_SIZE 32
#elif defined(__ARM_ARCH_7A__)
#define ABSL_CACHELINE_SIZE 64
#endif
#endif
#ifndef ABSL_CACHELINE_SIZE
// A reasonable default guess. Note that overestimates tend to waste more
// space, while underestimates tend to waste more time.
#define ABSL_CACHELINE_SIZE 64
#endif
// ABSL_CACHELINE_ALIGNED
//
// Indicates that the declared object be cache aligned using
// `ABSL_CACHELINE_SIZE` (see above). Cacheline aligning objects allows you to
// load a set of related objects in the L1 cache for performance improvements.
// Cacheline aligning objects properly allows constructive memory sharing and
// prevents destructive (or "false") memory sharing.
//
// NOTE: this macro should be replaced with usage of `alignas()` using
// `std::hardware_constructive_interference_size` and/or
// `std::hardware_destructive_interference_size` when available within C++17.
//
// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html
// for more information.
//
// On some compilers, `ABSL_CACHELINE_ALIGNED` expands to an `__attribute__`
// or `__declspec` attribute. For compilers where this is not known to work,
// the macro expands to nothing.
//
// No further guarantees are made here. The result of applying the macro
// to variables and types is always implementation-defined.
//
// WARNING: It is easy to use this attribute incorrectly, even to the point
// of causing bugs that are difficult to diagnose, crash, etc. It does not
// of itself guarantee that objects are aligned to a cache line.
//
// NOTE: Some compilers are picky about the locations of annotations such as
// this attribute, so prefer to put it at the beginning of your declaration.
// For example,
//
// ABSL_CACHELINE_ALIGNED static Foo* foo = ...
//
// class ABSL_CACHELINE_ALIGNED Bar { ...
//
// Recommendations:
//
// 1) Consult compiler documentation; this comment is not kept in sync as
// toolchains evolve.
// 2) Verify your use has the intended effect. This often requires inspecting
// the generated machine code.
// 3) Prefer applying this attribute to individual variables. Avoid
// applying it to types. This tends to localize the effect.
#define ABSL_CACHELINE_ALIGNED __attribute__((aligned(ABSL_CACHELINE_SIZE)))
#elif defined(_MSC_VER)
#define ABSL_CACHELINE_SIZE 64
#define ABSL_CACHELINE_ALIGNED __declspec(align(ABSL_CACHELINE_SIZE))
#else
#define ABSL_CACHELINE_SIZE 64
#define ABSL_CACHELINE_ALIGNED
#endif
// ABSL_PREDICT_TRUE, ABSL_PREDICT_FALSE
//
// Enables the compiler to prioritize compilation using static analysis for
// likely paths within a boolean branch.
//
// Example:
//
// if (ABSL_PREDICT_TRUE(expression)) {
// return result; // Faster if more likely
// } else {
// return 0;
// }
//
// Compilers can use the information that a certain branch is not likely to be
// taken (for instance, a CHECK failure) to optimize for the common case in
// the absence of better information (ie. compiling gcc with `-fprofile-arcs`).
//
// Recommendation: Modern CPUs dynamically predict branch execution paths,
// typically with accuracy greater than 97%. As a result, annotating every
// branch in a codebase is likely counterproductive; however, annotating
// specific branches that are both hot and consistently mispredicted is likely
// to yield performance improvements.
#if ABSL_HAVE_BUILTIN(__builtin_expect) || \
(defined(__GNUC__) && !defined(__clang__))
#define ABSL_PREDICT_FALSE(x) (__builtin_expect(x, 0))
#define ABSL_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
#else
#define ABSL_PREDICT_FALSE(x) (x)
#define ABSL_PREDICT_TRUE(x) (x)
#endif
#endif // ABSL_BASE_OPTIMIZATION_H_

View File

@@ -0,0 +1,121 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: policy_checks.h
// -----------------------------------------------------------------------------
//
// This header enforces a minimum set of policies at build time, such as the
// supported compiler and library versions. Unsupported configurations are
// reported with `#error`. This enforcement is best effort, so successfully
// compiling this header does not guarantee a supported configuration.
#ifndef ABSL_BASE_POLICY_CHECKS_H_
#define ABSL_BASE_POLICY_CHECKS_H_
// Included for the __GLIBC_PREREQ macro used below.
#include <limits.h>
// Included for the _STLPORT_VERSION macro used below.
#if defined(__cplusplus)
#include <cstddef>
#endif
// -----------------------------------------------------------------------------
// Operating System Check
// -----------------------------------------------------------------------------
#if defined(__CYGWIN__)
#error "Cygwin is not supported."
#endif
// -----------------------------------------------------------------------------
// Compiler Check
// -----------------------------------------------------------------------------
// We support MSVC++ 14.0 update 2 and later.
// This minimum will go up.
#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023918 && !defined(__clang__)
#error "This package requires Visual Studio 2015 Update 2 or higher."
#endif
// We support gcc 4.7 and later.
// This minimum will go up.
#if defined(__GNUC__) && !defined(__clang__)
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 7)
#error "This package requires gcc 4.7 or higher."
#endif
#endif
// We support Apple Xcode clang 4.2.1 (version 421.11.65) and later.
// This corresponds to Apple Xcode version 4.5.
// This minimum will go up.
#if defined(__apple_build_version__) && __apple_build_version__ < 4211165
#error "This package requires __apple_build_version__ of 4211165 or higher."
#endif
// -----------------------------------------------------------------------------
// C++ Version Check
// -----------------------------------------------------------------------------
// Enforce C++11 as the minimum. Note that Visual Studio has not
// advanced __cplusplus despite being good enough for our purposes, so
// so we exempt it from the check.
#if defined(__cplusplus) && !defined(_MSC_VER)
#if __cplusplus < 201103L
#error "C++ versions less than C++11 are not supported."
#endif
#endif
// -----------------------------------------------------------------------------
// Standard Library Check
// -----------------------------------------------------------------------------
// We have chosen glibc 2.12 as the minimum as it was tagged for release
// in May, 2010 and includes some functionality used in Google software
// (for instance pthread_setname_np):
// https://sourceware.org/ml/libc-alpha/2010-05/msg00000.html
#if defined(__GLIBC__) && defined(__GLIBC_PREREQ)
#if !__GLIBC_PREREQ(2, 12)
#error "Minimum required version of glibc is 2.12."
#endif
#endif
#if defined(_STLPORT_VERSION)
#error "STLPort is not supported."
#endif
// -----------------------------------------------------------------------------
// `char` Size Check
// -----------------------------------------------------------------------------
// Abseil currently assumes CHAR_BIT == 8. If you would like to use Abseil on a
// platform where this is not the case, please provide us with the details about
// your platform so we can consider relaxing this requirement.
#if CHAR_BIT != 8
#error "Abseil assumes CHAR_BIT == 8."
#endif
// -----------------------------------------------------------------------------
// `int` Size Check
// -----------------------------------------------------------------------------
// Abseil currently assumes that an int is 4 bytes. If you would like to use
// Abseil on a platform where this is not the case, please provide us with the
// details about your platform so we can consider relaxing this requirement.
#if INT_MAX < 2147483647
#error "Abseil assumes that int is at least 4 bytes. "
#endif
#endif // ABSL_BASE_POLICY_CHECKS_H_

26
ubuntu/absl/base/port.h Normal file
View File

@@ -0,0 +1,26 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This files is a forwarding header for other headers containing various
// portability macros and functions.
// This file is used for both C and C++!
#ifndef ABSL_BASE_PORT_H_
#define ABSL_BASE_PORT_H_
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/optimization.h"
#endif // ABSL_BASE_PORT_H_

View File

@@ -0,0 +1,79 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This test serves primarily as a compilation test for base/raw_logging.h.
// Raw logging testing is covered by logging_unittest.cc, which is not as
// portable as this test.
#include "absl/base/internal/raw_logging.h"
#include <tuple>
#include "gtest/gtest.h"
#include "absl/strings/str_cat.h"
namespace {
TEST(RawLoggingCompilationTest, Log) {
ABSL_RAW_LOG(INFO, "RAW INFO: %d", 1);
ABSL_RAW_LOG(INFO, "RAW INFO: %d %d", 1, 2);
ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d", 1, 2, 3);
ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d %d", 1, 2, 3, 4);
ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d %d %d", 1, 2, 3, 4, 5);
ABSL_RAW_LOG(WARNING, "RAW WARNING: %d", 1);
ABSL_RAW_LOG(ERROR, "RAW ERROR: %d", 1);
}
TEST(RawLoggingCompilationTest, PassingCheck) {
ABSL_RAW_CHECK(true, "RAW CHECK");
}
// Not all platforms support output from raw log, so we don't verify any
// particular output for RAW check failures (expecting the empty string
// accomplishes this). This test is primarily a compilation test, but we
// are verifying process death when EXPECT_DEATH works for a platform.
const char kExpectedDeathOutput[] = "";
TEST(RawLoggingDeathTest, FailingCheck) {
EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_CHECK(1 == 0, "explanation"),
kExpectedDeathOutput);
}
TEST(RawLoggingDeathTest, LogFatal) {
EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_LOG(FATAL, "my dog has fleas"),
kExpectedDeathOutput);
}
TEST(InternalLog, CompilationTest) {
ABSL_INTERNAL_LOG(INFO, "Internal Log");
std::string log_msg = "Internal Log";
ABSL_INTERNAL_LOG(INFO, log_msg);
ABSL_INTERNAL_LOG(INFO, log_msg + " 2");
float d = 1.1f;
ABSL_INTERNAL_LOG(INFO, absl::StrCat("Internal log ", 3, " + ", d));
}
TEST(InternalLogDeathTest, FailingCheck) {
EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_CHECK(1 == 0, "explanation"),
kExpectedDeathOutput);
}
TEST(InternalLogDeathTest, LogFatal) {
EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_LOG(FATAL, "my dog has fleas"),
kExpectedDeathOutput);
}
} // namespace

View File

@@ -0,0 +1,271 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// A bunch of threads repeatedly hash an array of ints protected by a
// spinlock. If the spinlock is working properly, all elements of the
// array should be equal at the end of the test.
#include <cstdint>
#include <limits>
#include <random>
#include <thread> // NOLINT(build/c++11)
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/internal/low_level_scheduling.h"
#include "absl/base/internal/scheduling_mode.h"
#include "absl/base/internal/spinlock.h"
#include "absl/base/internal/sysinfo.h"
#include "absl/base/macros.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/synchronization/notification.h"
constexpr int32_t kNumThreads = 10;
constexpr int32_t kIters = 1000;
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
// This is defined outside of anonymous namespace so that it can be
// a friend of SpinLock to access protected methods for testing.
struct SpinLockTest {
static uint32_t EncodeWaitCycles(int64_t wait_start_time,
int64_t wait_end_time) {
return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time);
}
static uint64_t DecodeWaitCycles(uint32_t lock_value) {
return SpinLock::DecodeWaitCycles(lock_value);
}
};
namespace {
static constexpr int kArrayLength = 10;
static uint32_t values[kArrayLength];
static SpinLock static_spinlock(base_internal::kLinkerInitialized);
static SpinLock static_cooperative_spinlock(
base_internal::kLinkerInitialized,
base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
static SpinLock static_noncooperative_spinlock(
base_internal::kLinkerInitialized, base_internal::SCHEDULE_KERNEL_ONLY);
// Simple integer hash function based on the public domain lookup2 hash.
// http://burtleburtle.net/bob/c/lookup2.c
static uint32_t Hash32(uint32_t a, uint32_t c) {
uint32_t b = 0x9e3779b9UL; // The golden ratio; an arbitrary value.
a -= b; a -= c; a ^= (c >> 13);
b -= c; b -= a; b ^= (a << 8);
c -= a; c -= b; c ^= (b >> 13);
a -= b; a -= c; a ^= (c >> 12);
b -= c; b -= a; b ^= (a << 16);
c -= a; c -= b; c ^= (b >> 5);
a -= b; a -= c; a ^= (c >> 3);
b -= c; b -= a; b ^= (a << 10);
c -= a; c -= b; c ^= (b >> 15);
return c;
}
static void TestFunction(int thread_salt, SpinLock* spinlock) {
for (int i = 0; i < kIters; i++) {
SpinLockHolder h(spinlock);
for (int j = 0; j < kArrayLength; j++) {
const int index = (j + thread_salt) % kArrayLength;
values[index] = Hash32(values[index], thread_salt);
std::this_thread::yield();
}
}
}
static void ThreadedTest(SpinLock* spinlock) {
std::vector<std::thread> threads;
for (int i = 0; i < kNumThreads; ++i) {
threads.push_back(std::thread(TestFunction, i, spinlock));
}
for (auto& thread : threads) {
thread.join();
}
SpinLockHolder h(spinlock);
for (int i = 1; i < kArrayLength; i++) {
EXPECT_EQ(values[0], values[i]);
}
}
TEST(SpinLock, StackNonCooperativeDisablesScheduling) {
SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
spinlock.Lock();
EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed());
spinlock.Unlock();
}
TEST(SpinLock, StaticNonCooperativeDisablesScheduling) {
static_noncooperative_spinlock.Lock();
EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed());
static_noncooperative_spinlock.Unlock();
}
TEST(SpinLock, WaitCyclesEncoding) {
// These are implementation details not exported by SpinLock.
const int kProfileTimestampShift = 7;
const int kLockwordReservedShift = 3;
const uint32_t kSpinLockSleeper = 8;
// We should be able to encode up to (1^kMaxCycleBits - 1) without clamping
// but the lower kProfileTimestampShift will be dropped.
const int kMaxCyclesShift =
32 - kLockwordReservedShift + kProfileTimestampShift;
const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;
// These bits should be zero after encoding.
const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1;
// These bits are dropped when wait cycles are encoded.
const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1;
// Test a bunch of random values
std::default_random_engine generator;
// Shift to avoid overflow below.
std::uniform_int_distribution<uint64_t> time_distribution(
0, std::numeric_limits<uint64_t>::max() >> 4);
std::uniform_int_distribution<uint64_t> cycle_distribution(0, kMaxCycles);
for (int i = 0; i < 100; i++) {
int64_t start_time = time_distribution(generator);
int64_t cycles = cycle_distribution(generator);
int64_t end_time = start_time + cycles;
uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time);
EXPECT_EQ(0, lock_value & kLockwordReservedMask);
uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value);
EXPECT_EQ(0, decoded & kProfileTimestampMask);
EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded);
}
// Test corner cases
int64_t start_time = time_distribution(generator);
EXPECT_EQ(kSpinLockSleeper,
SpinLockTest::EncodeWaitCycles(start_time, start_time));
EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(0));
EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(kLockwordReservedMask));
EXPECT_EQ(kMaxCycles & ~kProfileTimestampMask,
SpinLockTest::DecodeWaitCycles(~kLockwordReservedMask));
// Check that we cannot produce kSpinLockSleeper during encoding.
int64_t sleeper_cycles =
kSpinLockSleeper << (kProfileTimestampShift - kLockwordReservedShift);
uint32_t sleeper_value =
SpinLockTest::EncodeWaitCycles(start_time, start_time + sleeper_cycles);
EXPECT_NE(sleeper_value, kSpinLockSleeper);
// Test clamping
uint32_t max_value =
SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles);
uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);
uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;
EXPECT_EQ(expected_max_value_decoded, max_value_decoded);
const int64_t step = (1 << kProfileTimestampShift);
uint32_t after_max_value =
SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step);
uint64_t after_max_value_decoded =
SpinLockTest::DecodeWaitCycles(after_max_value);
EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded);
uint32_t before_max_value = SpinLockTest::EncodeWaitCycles(
start_time, start_time + kMaxCycles - step);
uint64_t before_max_value_decoded =
SpinLockTest::DecodeWaitCycles(before_max_value);
EXPECT_GT(expected_max_value_decoded, before_max_value_decoded);
}
TEST(SpinLockWithThreads, StaticSpinLock) {
ThreadedTest(&static_spinlock);
}
TEST(SpinLockWithThreads, StackSpinLock) {
SpinLock spinlock;
ThreadedTest(&spinlock);
}
TEST(SpinLockWithThreads, StackCooperativeSpinLock) {
SpinLock spinlock(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
ThreadedTest(&spinlock);
}
TEST(SpinLockWithThreads, StackNonCooperativeSpinLock) {
SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
ThreadedTest(&spinlock);
}
TEST(SpinLockWithThreads, StaticCooperativeSpinLock) {
ThreadedTest(&static_cooperative_spinlock);
}
TEST(SpinLockWithThreads, StaticNonCooperativeSpinLock) {
ThreadedTest(&static_noncooperative_spinlock);
}
TEST(SpinLockWithThreads, DoesNotDeadlock) {
struct Helper {
static void NotifyThenLock(Notification* locked, SpinLock* spinlock,
BlockingCounter* b) {
locked->WaitForNotification(); // Wait for LockThenWait() to hold "s".
b->DecrementCount();
SpinLockHolder l(spinlock);
}
static void LockThenWait(Notification* locked, SpinLock* spinlock,
BlockingCounter* b) {
SpinLockHolder l(spinlock);
locked->Notify();
b->Wait();
}
static void DeadlockTest(SpinLock* spinlock, int num_spinners) {
Notification locked;
BlockingCounter counter(num_spinners);
std::vector<std::thread> threads;
threads.push_back(
std::thread(Helper::LockThenWait, &locked, spinlock, &counter));
for (int i = 0; i < num_spinners; ++i) {
threads.push_back(
std::thread(Helper::NotifyThenLock, &locked, spinlock, &counter));
}
for (auto& thread : threads) {
thread.join();
}
}
};
SpinLock stack_cooperative_spinlock(
base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);
SpinLock stack_noncooperative_spinlock(base_internal::SCHEDULE_KERNEL_ONLY);
Helper::DeadlockTest(&stack_cooperative_spinlock,
base_internal::NumCPUs() * 2);
Helper::DeadlockTest(&stack_noncooperative_spinlock,
base_internal::NumCPUs() * 2);
Helper::DeadlockTest(&static_cooperative_spinlock,
base_internal::NumCPUs() * 2);
Helper::DeadlockTest(&static_noncooperative_spinlock,
base_internal::NumCPUs() * 2);
}
} // namespace
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,279 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: thread_annotations.h
// -----------------------------------------------------------------------------
//
// This header file contains macro definitions for thread safety annotations
// that allow developers to document the locking policies of multi-threaded
// code. The annotations can also help program analysis tools to identify
// potential thread safety issues.
//
// These annotations are implemented using compiler attributes. Using the macros
// defined here instead of raw attributes allow for portability and future
// compatibility.
//
// When referring to mutexes in the arguments of the attributes, you should
// use variable names or more complex expressions (e.g. my_object->mutex_)
// that evaluate to a concrete mutex object whenever possible. If the mutex
// you want to refer to is not in scope, you may use a member pointer
// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object.
#ifndef ABSL_BASE_THREAD_ANNOTATIONS_H_
#define ABSL_BASE_THREAD_ANNOTATIONS_H_
// TODO(mbonadei): Remove after the backward compatibility period.
#include "absl/base/internal/thread_annotations.h" // IWYU pragma: export
#if defined(__clang__)
#define ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(x) __attribute__((x))
#else
#define ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(x) // no-op
#endif
// ABSL_GUARDED_BY()
//
// Documents if a shared field or global variable needs to be protected by a
// mutex. ABSL_GUARDED_BY() allows the user to specify a particular mutex that
// should be held when accessing the annotated variable.
//
// Although this annotation (and ABSL_PT_GUARDED_BY, below) cannot be applied to
// local variables, a local variable and its associated mutex can often be
// combined into a small class or struct, thereby allowing the annotation.
//
// Example:
//
// class Foo {
// Mutex mu_;
// int p1_ ABSL_GUARDED_BY(mu_);
// ...
// };
#define ABSL_GUARDED_BY(x) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(guarded_by(x))
// ABSL_PT_GUARDED_BY()
//
// Documents if the memory location pointed to by a pointer should be guarded
// by a mutex when dereferencing the pointer.
//
// Example:
// class Foo {
// Mutex mu_;
// int *p1_ ABSL_PT_GUARDED_BY(mu_);
// ...
// };
//
// Note that a pointer variable to a shared memory location could itself be a
// shared variable.
//
// Example:
//
// // `q_`, guarded by `mu1_`, points to a shared memory location that is
// // guarded by `mu2_`:
// int *q_ ABSL_GUARDED_BY(mu1_) ABSL_PT_GUARDED_BY(mu2_);
#define ABSL_PT_GUARDED_BY(x) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(pt_guarded_by(x))
// ABSL_ACQUIRED_AFTER() / ABSL_ACQUIRED_BEFORE()
//
// Documents the acquisition order between locks that can be held
// simultaneously by a thread. For any two locks that need to be annotated
// to establish an acquisition order, only one of them needs the annotation.
// (i.e. You don't have to annotate both locks with both ABSL_ACQUIRED_AFTER
// and ABSL_ACQUIRED_BEFORE.)
//
// As with ABSL_GUARDED_BY, this is only applicable to mutexes that are shared
// fields or global variables.
//
// Example:
//
// Mutex m1_;
// Mutex m2_ ABSL_ACQUIRED_AFTER(m1_);
#define ABSL_ACQUIRED_AFTER(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(acquired_after(__VA_ARGS__))
#define ABSL_ACQUIRED_BEFORE(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(acquired_before(__VA_ARGS__))
// ABSL_EXCLUSIVE_LOCKS_REQUIRED() / ABSL_SHARED_LOCKS_REQUIRED()
//
// Documents a function that expects a mutex to be held prior to entry.
// The mutex is expected to be held both on entry to, and exit from, the
// function.
//
// An exclusive lock allows read-write access to the guarded data member(s), and
// only one thread can acquire a lock exclusively at any one time. A shared lock
// allows read-only access, and any number of threads can acquire a shared lock
// concurrently.
//
// Generally, non-const methods should be annotated with
// ABSL_EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with
// ABSL_SHARED_LOCKS_REQUIRED.
//
// Example:
//
// Mutex mu1, mu2;
// int a ABSL_GUARDED_BY(mu1);
// int b ABSL_GUARDED_BY(mu2);
//
// void foo() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... }
// void bar() const ABSL_SHARED_LOCKS_REQUIRED(mu1, mu2) { ... }
#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \
exclusive_locks_required(__VA_ARGS__))
#define ABSL_SHARED_LOCKS_REQUIRED(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(shared_locks_required(__VA_ARGS__))
// ABSL_LOCKS_EXCLUDED()
//
// Documents the locks acquired in the body of the function. These locks
// cannot be held when calling this function (as Abseil's `Mutex` locks are
// non-reentrant).
#define ABSL_LOCKS_EXCLUDED(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(locks_excluded(__VA_ARGS__))
// ABSL_LOCK_RETURNED()
//
// Documents a function that returns a mutex without acquiring it. For example,
// a public getter method that returns a pointer to a private mutex should
// be annotated with ABSL_LOCK_RETURNED.
#define ABSL_LOCK_RETURNED(x) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(lock_returned(x))
// ABSL_LOCKABLE
//
// Documents if a class/type is a lockable type (such as the `Mutex` class).
#define ABSL_LOCKABLE ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(lockable)
// ABSL_SCOPED_LOCKABLE
//
// Documents if a class does RAII locking (such as the `MutexLock` class).
// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is
// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no
// arguments; the analysis will assume that the destructor unlocks whatever the
// constructor locked.
#define ABSL_SCOPED_LOCKABLE \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(scoped_lockable)
// ABSL_EXCLUSIVE_LOCK_FUNCTION()
//
// Documents functions that acquire a lock in the body of a function, and do
// not release it.
#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \
exclusive_lock_function(__VA_ARGS__))
// ABSL_SHARED_LOCK_FUNCTION()
//
// Documents functions that acquire a shared (reader) lock in the body of a
// function, and do not release it.
#define ABSL_SHARED_LOCK_FUNCTION(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(shared_lock_function(__VA_ARGS__))
// ABSL_UNLOCK_FUNCTION()
//
// Documents functions that expect a lock to be held on entry to the function,
// and release it in the body of the function.
#define ABSL_UNLOCK_FUNCTION(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(unlock_function(__VA_ARGS__))
// ABSL_EXCLUSIVE_TRYLOCK_FUNCTION() / ABSL_SHARED_TRYLOCK_FUNCTION()
//
// Documents functions that try to acquire a lock, and return success or failure
// (or a non-boolean value that can be interpreted as a boolean).
// The first argument should be `true` for functions that return `true` on
// success, or `false` for functions that return `false` on success. The second
// argument specifies the mutex that is locked on success. If unspecified, this
// mutex is assumed to be `this`.
#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \
exclusive_trylock_function(__VA_ARGS__))
#define ABSL_SHARED_TRYLOCK_FUNCTION(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE( \
shared_trylock_function(__VA_ARGS__))
// ABSL_ASSERT_EXCLUSIVE_LOCK() / ABSL_ASSERT_SHARED_LOCK()
//
// Documents functions that dynamically check to see if a lock is held, and fail
// if it is not held.
#define ABSL_ASSERT_EXCLUSIVE_LOCK(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(assert_exclusive_lock(__VA_ARGS__))
#define ABSL_ASSERT_SHARED_LOCK(...) \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(assert_shared_lock(__VA_ARGS__))
// ABSL_NO_THREAD_SAFETY_ANALYSIS
//
// Turns off thread safety checking within the body of a particular function.
// This annotation is used to mark functions that are known to be correct, but
// the locking behavior is more complicated than the analyzer can handle.
#define ABSL_NO_THREAD_SAFETY_ANALYSIS \
ABSL_INTERNAL_THREAD_ANNOTATION_ATTRIBUTE(no_thread_safety_analysis)
//------------------------------------------------------------------------------
// Tool-Supplied Annotations
//------------------------------------------------------------------------------
// ABSL_TS_UNCHECKED should be placed around lock expressions that are not valid
// C++ syntax, but which are present for documentation purposes. These
// annotations will be ignored by the analysis.
#define ABSL_TS_UNCHECKED(x) ""
// ABSL_TS_FIXME is used to mark lock expressions that are not valid C++ syntax.
// It is used by automated tools to mark and disable invalid expressions.
// The annotation should either be fixed, or changed to ABSL_TS_UNCHECKED.
#define ABSL_TS_FIXME(x) ""
// Like ABSL_NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body
// of a particular function. However, this attribute is used to mark functions
// that are incorrect and need to be fixed. It is used by automated tools to
// avoid breaking the build when the analysis is updated.
// Code owners are expected to eventually fix the routine.
#define ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME ABSL_NO_THREAD_SAFETY_ANALYSIS
// Similar to ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a
// ABSL_GUARDED_BY annotation that needs to be fixed, because it is producing
// thread safety warning. It disables the ABSL_GUARDED_BY.
#define ABSL_GUARDED_BY_FIXME(x)
// Disables warnings for a single read operation. This can be used to avoid
// warnings when it is known that the read is not actually involved in a race,
// but the compiler cannot confirm that.
#define ABSL_TS_UNCHECKED_READ(x) absl::base_internal::ts_unchecked_read(x)
namespace absl {
inline namespace lts_2019_08_08 {
namespace base_internal {
// Takes a reference to a guarded data member, and returns an unguarded
// reference.
// Do not used this function directly, use ABSL_TS_UNCHECKED_READ instead.
template <typename T>
inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS {
return v;
}
template <typename T>
inline T& ts_unchecked_read(T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS {
return v;
}
} // namespace base_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_BASE_THREAD_ANNOTATIONS_H_

View File

@@ -0,0 +1,94 @@
// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/throw_delegate.h"
#include <functional>
#include <new>
#include <stdexcept>
#include "gtest/gtest.h"
namespace {
using absl::base_internal::ThrowStdLogicError;
using absl::base_internal::ThrowStdInvalidArgument;
using absl::base_internal::ThrowStdDomainError;
using absl::base_internal::ThrowStdLengthError;
using absl::base_internal::ThrowStdOutOfRange;
using absl::base_internal::ThrowStdRuntimeError;
using absl::base_internal::ThrowStdRangeError;
using absl::base_internal::ThrowStdOverflowError;
using absl::base_internal::ThrowStdUnderflowError;
using absl::base_internal::ThrowStdBadFunctionCall;
using absl::base_internal::ThrowStdBadAlloc;
constexpr const char* what_arg = "The quick brown fox jumps over the lazy dog";
template <typename E>
void ExpectThrowChar(void (*f)(const char*)) {
try {
f(what_arg);
FAIL() << "Didn't throw";
} catch (const E& e) {
EXPECT_STREQ(e.what(), what_arg);
}
}
template <typename E>
void ExpectThrowString(void (*f)(const std::string&)) {
try {
f(what_arg);
FAIL() << "Didn't throw";
} catch (const E& e) {
EXPECT_STREQ(e.what(), what_arg);
}
}
template <typename E>
void ExpectThrowNoWhat(void (*f)()) {
try {
f();
FAIL() << "Didn't throw";
} catch (const E& e) {
}
}
TEST(ThrowHelper, Test) {
// Not using EXPECT_THROW because we want to check the .what() message too.
ExpectThrowChar<std::logic_error>(ThrowStdLogicError);
ExpectThrowChar<std::invalid_argument>(ThrowStdInvalidArgument);
ExpectThrowChar<std::domain_error>(ThrowStdDomainError);
ExpectThrowChar<std::length_error>(ThrowStdLengthError);
ExpectThrowChar<std::out_of_range>(ThrowStdOutOfRange);
ExpectThrowChar<std::runtime_error>(ThrowStdRuntimeError);
ExpectThrowChar<std::range_error>(ThrowStdRangeError);
ExpectThrowChar<std::overflow_error>(ThrowStdOverflowError);
ExpectThrowChar<std::underflow_error>(ThrowStdUnderflowError);
ExpectThrowString<std::logic_error>(ThrowStdLogicError);
ExpectThrowString<std::invalid_argument>(ThrowStdInvalidArgument);
ExpectThrowString<std::domain_error>(ThrowStdDomainError);
ExpectThrowString<std::length_error>(ThrowStdLengthError);
ExpectThrowString<std::out_of_range>(ThrowStdOutOfRange);
ExpectThrowString<std::runtime_error>(ThrowStdRuntimeError);
ExpectThrowString<std::range_error>(ThrowStdRangeError);
ExpectThrowString<std::overflow_error>(ThrowStdOverflowError);
ExpectThrowString<std::underflow_error>(ThrowStdUnderflowError);
ExpectThrowNoWhat<std::bad_function_call>(ThrowStdBadFunctionCall);
ExpectThrowNoWhat<std::bad_alloc>(ThrowStdBadAlloc);
}
} // namespace

View File

@@ -0,0 +1,38 @@
#
# Copyright 2018 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates config_setting that allows selecting based on 'compiler' value."""
def create_llvm_config(name, visibility):
# The "do_not_use_tools_cpp_compiler_present" attribute exists to
# distinguish between older versions of Bazel that do not support
# "@bazel_tools//tools/cpp:compiler" flag_value, and newer ones that do.
# In the future, the only way to select on the compiler will be through
# flag_values{"@bazel_tools//tools/cpp:compiler"} and the else branch can
# be removed.
if hasattr(cc_common, "do_not_use_tools_cpp_compiler_present"):
native.config_setting(
name = name,
flag_values = {
"@bazel_tools//tools/cpp:compiler": "llvm",
},
visibility = visibility,
)
else:
native.config_setting(
name = name,
values = {"compiler": "llvm"},
visibility = visibility,
)

View File

@@ -0,0 +1,827 @@
#
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
load(
"//absl:copts/configure_copts.bzl",
"ABSL_DEFAULT_COPTS",
"ABSL_DEFAULT_LINKOPTS",
"ABSL_EXCEPTIONS_FLAG",
"ABSL_EXCEPTIONS_FLAG_LINKOPTS",
"ABSL_TEST_COPTS",
)
package(default_visibility = ["//visibility:public"])
licenses(["notice"]) # Apache 2.0
cc_library(
name = "compressed_tuple",
hdrs = ["internal/compressed_tuple.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/utility",
],
)
cc_test(
name = "compressed_tuple_test",
srcs = ["internal/compressed_tuple_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":compressed_tuple",
":test_instance_tracker",
"//absl/memory",
"//absl/types:any",
"//absl/types:optional",
"//absl/utility",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "fixed_array",
hdrs = ["fixed_array.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":compressed_tuple",
"//absl/algorithm",
"//absl/base:core_headers",
"//absl/base:dynamic_annotations",
"//absl/base:throw_delegate",
"//absl/memory",
],
)
cc_test(
name = "fixed_array_test",
srcs = ["fixed_array_test.cc"],
copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS + ABSL_DEFAULT_LINKOPTS,
deps = [
":fixed_array",
"//absl/base:exception_testing",
"//absl/hash:hash_testing",
"//absl/memory",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "fixed_array_test_noexceptions",
srcs = ["fixed_array_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":fixed_array",
"//absl/base:exception_testing",
"//absl/hash:hash_testing",
"//absl/memory",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "fixed_array_exception_safety_test",
srcs = ["fixed_array_exception_safety_test.cc"],
copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS + ABSL_DEFAULT_LINKOPTS,
deps = [
":fixed_array",
"//absl/base:exception_safety_testing",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "fixed_array_benchmark",
srcs = ["fixed_array_benchmark.cc"],
copts = ABSL_TEST_COPTS + ["$(STACK_FRAME_UNLIMITED)"],
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["benchmark"],
deps = [
":fixed_array",
"@com_github_google_benchmark//:benchmark_main",
],
)
cc_library(
name = "inlined_vector_internal",
hdrs = ["internal/inlined_vector.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":compressed_tuple",
"//absl/base:core_headers",
"//absl/memory",
"//absl/meta:type_traits",
"//absl/types:span",
],
)
cc_library(
name = "inlined_vector",
hdrs = ["inlined_vector.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":inlined_vector_internal",
"//absl/algorithm",
"//absl/base:core_headers",
"//absl/base:throw_delegate",
"//absl/memory",
],
)
cc_library(
name = "counting_allocator",
testonly = 1,
hdrs = ["internal/counting_allocator.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = ["//visibility:private"],
)
cc_test(
name = "inlined_vector_test",
srcs = ["inlined_vector_test.cc"],
copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS + ABSL_DEFAULT_LINKOPTS,
deps = [
":counting_allocator",
":inlined_vector",
":test_instance_tracker",
"//absl/base",
"//absl/base:core_headers",
"//absl/base:exception_testing",
"//absl/hash:hash_testing",
"//absl/memory",
"//absl/strings",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "inlined_vector_test_noexceptions",
srcs = ["inlined_vector_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":counting_allocator",
":inlined_vector",
":test_instance_tracker",
"//absl/base",
"//absl/base:core_headers",
"//absl/base:exception_testing",
"//absl/hash:hash_testing",
"//absl/memory",
"//absl/strings",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "inlined_vector_benchmark",
srcs = ["inlined_vector_benchmark.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["benchmark"],
deps = [
":inlined_vector",
"//absl/base",
"//absl/base:core_headers",
"//absl/strings",
"@com_github_google_benchmark//:benchmark_main",
],
)
cc_test(
name = "inlined_vector_exception_safety_test",
srcs = ["inlined_vector_exception_safety_test.cc"],
copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
deps = [
":inlined_vector",
"//absl/base:exception_safety_testing",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "test_instance_tracker",
testonly = 1,
srcs = ["internal/test_instance_tracker.cc"],
hdrs = ["internal/test_instance_tracker.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
deps = ["//absl/types:compare"],
)
cc_test(
name = "test_instance_tracker_test",
srcs = ["internal/test_instance_tracker_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":test_instance_tracker",
"@com_google_googletest//:gtest_main",
],
)
NOTEST_TAGS_NONMOBILE = [
"no_test_darwin_x86_64",
"no_test_loonix",
]
NOTEST_TAGS_MOBILE = [
"no_test_android_arm",
"no_test_android_arm64",
"no_test_android_x86",
"no_test_ios_x86_64",
]
NOTEST_TAGS = NOTEST_TAGS_MOBILE + NOTEST_TAGS_NONMOBILE
cc_library(
name = "flat_hash_map",
hdrs = ["flat_hash_map.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":container_memory",
":hash_function_defaults",
":raw_hash_map",
"//absl/algorithm:container",
"//absl/memory",
],
)
cc_test(
name = "flat_hash_map_test",
srcs = ["flat_hash_map_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":flat_hash_map",
":hash_generator_testing",
":unordered_map_constructor_test",
":unordered_map_lookup_test",
":unordered_map_members_test",
":unordered_map_modifiers_test",
"//absl/types:any",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "flat_hash_set",
hdrs = ["flat_hash_set.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":container_memory",
":hash_function_defaults",
":raw_hash_set",
"//absl/algorithm:container",
"//absl/base:core_headers",
"//absl/memory",
],
)
cc_test(
name = "flat_hash_set_test",
srcs = ["flat_hash_set_test.cc"],
copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"],
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":flat_hash_set",
":hash_generator_testing",
":unordered_set_constructor_test",
":unordered_set_lookup_test",
":unordered_set_members_test",
":unordered_set_modifiers_test",
"//absl/memory",
"//absl/strings",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "node_hash_map",
hdrs = ["node_hash_map.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":container_memory",
":hash_function_defaults",
":node_hash_policy",
":raw_hash_map",
"//absl/algorithm:container",
"//absl/memory",
],
)
cc_test(
name = "node_hash_map_test",
srcs = ["node_hash_map_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":hash_generator_testing",
":node_hash_map",
":tracked",
":unordered_map_constructor_test",
":unordered_map_lookup_test",
":unordered_map_members_test",
":unordered_map_modifiers_test",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "node_hash_set",
hdrs = ["node_hash_set.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_function_defaults",
":node_hash_policy",
":raw_hash_set",
"//absl/algorithm:container",
"//absl/memory",
],
)
cc_test(
name = "node_hash_set_test",
srcs = ["node_hash_set_test.cc"],
copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"],
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":node_hash_set",
":unordered_set_constructor_test",
":unordered_set_lookup_test",
":unordered_set_members_test",
":unordered_set_modifiers_test",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "container_memory",
hdrs = ["internal/container_memory.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/memory",
"//absl/utility",
],
)
cc_test(
name = "container_memory_test",
srcs = ["internal/container_memory_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":container_memory",
"//absl/strings",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "hash_function_defaults",
hdrs = ["internal/hash_function_defaults.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/base:config",
"//absl/hash",
"//absl/strings",
],
)
cc_test(
name = "hash_function_defaults_test",
srcs = ["internal/hash_function_defaults_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS,
deps = [
":hash_function_defaults",
"//absl/hash",
"//absl/strings",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "hash_generator_testing",
testonly = 1,
srcs = ["internal/hash_generator_testing.cc"],
hdrs = ["internal/hash_generator_testing.h"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_policy_testing",
"//absl/meta:type_traits",
"//absl/strings",
],
)
cc_library(
name = "hash_policy_testing",
testonly = 1,
hdrs = ["internal/hash_policy_testing.h"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/hash",
"//absl/strings",
],
)
cc_test(
name = "hash_policy_testing_test",
srcs = ["internal/hash_policy_testing_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_policy_testing",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "hash_policy_traits",
hdrs = ["internal/hash_policy_traits.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = ["//absl/meta:type_traits"],
)
cc_test(
name = "hash_policy_traits_test",
srcs = ["internal/hash_policy_traits_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_policy_traits",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "hashtable_debug",
hdrs = ["internal/hashtable_debug.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hashtable_debug_hooks",
],
)
cc_library(
name = "hashtable_debug_hooks",
hdrs = ["internal/hashtable_debug_hooks.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
)
cc_library(
name = "hashtablez_sampler",
srcs = [
"internal/hashtablez_sampler.cc",
"internal/hashtablez_sampler_force_weak_definition.cc",
],
hdrs = ["internal/hashtablez_sampler.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":have_sse",
"//absl/base",
"//absl/base:core_headers",
"//absl/debugging:stacktrace",
"//absl/memory",
"//absl/synchronization",
"//absl/utility",
],
)
cc_test(
name = "hashtablez_sampler_test",
srcs = ["internal/hashtablez_sampler_test.cc"],
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hashtablez_sampler",
":have_sse",
"//absl/base:core_headers",
"//absl/synchronization",
"//absl/synchronization:thread_pool",
"//absl/time",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "node_hash_policy",
hdrs = ["internal/node_hash_policy.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
)
cc_test(
name = "node_hash_policy_test",
srcs = ["internal/node_hash_policy_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_policy_traits",
":node_hash_policy",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "raw_hash_map",
hdrs = ["internal/raw_hash_map.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":container_memory",
":raw_hash_set",
"//absl/base:throw_delegate",
],
)
cc_library(
name = "have_sse",
hdrs = ["internal/have_sse.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = ["//visibility:private"],
)
cc_library(
name = "common",
hdrs = ["internal/common.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/meta:type_traits",
"//absl/types:optional",
],
)
cc_library(
name = "raw_hash_set",
srcs = ["internal/raw_hash_set.cc"],
hdrs = ["internal/raw_hash_set.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":common",
":compressed_tuple",
":container_memory",
":hash_policy_traits",
":hashtable_debug_hooks",
":hashtablez_sampler",
":have_sse",
":layout",
"//absl/base:bits",
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:endian",
"//absl/memory",
"//absl/meta:type_traits",
"//absl/utility",
],
)
cc_test(
name = "raw_hash_set_test",
srcs = ["internal/raw_hash_set_test.cc"],
copts = ABSL_TEST_COPTS,
linkstatic = 1,
tags = NOTEST_TAGS,
deps = [
":container_memory",
":hash_function_defaults",
":hash_policy_testing",
":hashtable_debug",
":raw_hash_set",
"//absl/base",
"//absl/base:core_headers",
"//absl/strings",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "raw_hash_set_allocator_test",
size = "small",
srcs = ["internal/raw_hash_set_allocator_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":raw_hash_set",
":tracked",
"//absl/base:core_headers",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "layout",
hdrs = ["internal/layout.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/base:core_headers",
"//absl/meta:type_traits",
"//absl/strings",
"//absl/types:span",
"//absl/utility",
],
)
cc_test(
name = "layout_test",
size = "small",
srcs = ["internal/layout_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS,
visibility = ["//visibility:private"],
deps = [
":layout",
"//absl/base",
"//absl/base:core_headers",
"//absl/types:span",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "tracked",
testonly = 1,
hdrs = ["internal/tracked.h"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
)
cc_library(
name = "unordered_map_constructor_test",
testonly = 1,
hdrs = ["internal/unordered_map_constructor_test.h"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
"@com_google_googletest//:gtest",
],
)
cc_library(
name = "unordered_map_lookup_test",
testonly = 1,
hdrs = ["internal/unordered_map_lookup_test.h"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
"@com_google_googletest//:gtest",
],
)
cc_library(
name = "unordered_map_modifiers_test",
testonly = 1,
hdrs = ["internal/unordered_map_modifiers_test.h"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
"@com_google_googletest//:gtest",
],
)
cc_library(
name = "unordered_set_constructor_test",
testonly = 1,
hdrs = ["internal/unordered_set_constructor_test.h"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
"//absl/meta:type_traits",
"@com_google_googletest//:gtest",
],
)
cc_library(
name = "unordered_set_members_test",
testonly = 1,
hdrs = ["internal/unordered_set_members_test.h"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/meta:type_traits",
"@com_google_googletest//:gtest",
],
)
cc_library(
name = "unordered_map_members_test",
testonly = 1,
hdrs = ["internal/unordered_map_members_test.h"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/meta:type_traits",
"@com_google_googletest//:gtest",
],
)
cc_library(
name = "unordered_set_lookup_test",
testonly = 1,
hdrs = ["internal/unordered_set_lookup_test.h"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
"@com_google_googletest//:gtest",
],
)
cc_library(
name = "unordered_set_modifiers_test",
testonly = 1,
hdrs = ["internal/unordered_set_modifiers_test.h"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_generator_testing",
":hash_policy_testing",
"@com_google_googletest//:gtest",
],
)
cc_test(
name = "unordered_set_test",
srcs = ["internal/unordered_set_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":unordered_set_constructor_test",
":unordered_set_lookup_test",
":unordered_set_members_test",
":unordered_set_modifiers_test",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "unordered_map_test",
srcs = ["internal/unordered_map_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":unordered_map_constructor_test",
":unordered_map_lookup_test",
":unordered_map_members_test",
":unordered_map_modifiers_test",
"@com_google_googletest//:gtest_main",
],
)

View File

@@ -0,0 +1,853 @@
#
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is deprecated and will be removed in the future. It also doesn't do
# anything anyways. Prefer to use the library associated with the API you are
# using.
absl_cc_library(
NAME
container
PUBLIC
)
absl_cc_library(
NAME
compressed_tuple
HDRS
"internal/compressed_tuple.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::utility
PUBLIC
)
absl_cc_test(
NAME
compressed_tuple_test
SRCS
"internal/compressed_tuple_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::any
absl::compressed_tuple
absl::memory
absl::optional
absl::test_instance_tracker
absl::utility
gmock_main
)
absl_cc_library(
NAME
fixed_array
HDRS
"fixed_array.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::compressed_tuple
absl::algorithm
absl::core_headers
absl::dynamic_annotations
absl::throw_delegate
absl::memory
PUBLIC
)
absl_cc_test(
NAME
fixed_array_test
SRCS
"fixed_array_test.cc"
COPTS
${ABSL_TEST_COPTS}
${ABSL_EXCEPTIONS_FLAG}
LINKOPTS
${ABSL_EXCEPTIONS_FLAG_LINKOPTS}
DEPS
absl::fixed_array
absl::exception_testing
absl::hash_testing
absl::memory
gmock_main
)
absl_cc_test(
NAME
fixed_array_test_noexceptions
SRCS
"fixed_array_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::fixed_array
absl::exception_testing
absl::hash_testing
absl::memory
gmock_main
)
absl_cc_test(
NAME
fixed_array_exception_safety_test
SRCS
"fixed_array_exception_safety_test.cc"
COPTS
${ABSL_TEST_COPTS}
${ABSL_EXCEPTIONS_FLAG}
LINKOPTS
${ABSL_EXCEPTIONS_FLAG_LINKOPTS}
DEPS
absl::fixed_array
absl::exception_safety_testing
gmock_main
)
absl_cc_library(
NAME
inlined_vector_internal
HDRS
"internal/inlined_vector.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::compressed_tuple
absl::core_headers
absl::memory
absl::span
absl::type_traits
PUBLIC
)
absl_cc_library(
NAME
inlined_vector
HDRS
"inlined_vector.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::algorithm
absl::core_headers
absl::inlined_vector_internal
absl::throw_delegate
absl::memory
PUBLIC
)
absl_cc_library(
NAME
counting_allocator
HDRS
"internal/counting_allocator.h"
COPTS
${ABSL_DEFAULT_COPTS}
)
absl_cc_test(
NAME
inlined_vector_test
SRCS
"inlined_vector_test.cc"
COPTS
${ABSL_TEST_COPTS}
${ABSL_EXCEPTIONS_FLAG}
LINKOPTS
${ABSL_EXCEPTIONS_FLAG_LINKOPTS}
DEPS
absl::counting_allocator
absl::inlined_vector
absl::test_instance_tracker
absl::base
absl::core_headers
absl::exception_testing
absl::hash_testing
absl::memory
absl::strings
gmock_main
)
absl_cc_test(
NAME
inlined_vector_test_noexceptions
SRCS
"inlined_vector_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::inlined_vector
absl::test_instance_tracker
absl::base
absl::core_headers
absl::exception_testing
absl::hash_testing
absl::memory
absl::strings
gmock_main
)
absl_cc_test(
NAME
inlined_vector_exception_safety_test
SRCS
"inlined_vector_exception_safety_test.cc"
COPTS
${ABSL_TEST_COPTS}
${ABSL_EXCEPTIONS_FLAG}
LINKOPTS
${ABSL_EXCEPTIONS_FLAG_LINKOPTS}
DEPS
absl::inlined_vector
absl::exception_safety_testing
gmock_main
)
absl_cc_library(
NAME
test_instance_tracker
HDRS
"internal/test_instance_tracker.h"
SRCS
"internal/test_instance_tracker.cc"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::compare
TESTONLY
)
absl_cc_test(
NAME
test_instance_tracker_test
SRCS
"internal/test_instance_tracker_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::test_instance_tracker
gmock_main
)
absl_cc_library(
NAME
flat_hash_map
HDRS
"flat_hash_map.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::container_memory
absl::hash_function_defaults
absl::raw_hash_map
absl::algorithm_container
absl::memory
PUBLIC
)
absl_cc_test(
NAME
flat_hash_map_test
SRCS
"flat_hash_map_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::flat_hash_map
absl::hash_generator_testing
absl::unordered_map_constructor_test
absl::unordered_map_lookup_test
absl::unordered_map_members_test
absl::unordered_map_modifiers_test
absl::any
gmock_main
)
absl_cc_library(
NAME
flat_hash_set
HDRS
"flat_hash_set.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::container_memory
absl::hash_function_defaults
absl::raw_hash_set
absl::algorithm_container
absl::core_headers
absl::memory
PUBLIC
)
absl_cc_test(
NAME
flat_hash_set_test
SRCS
"flat_hash_set_test.cc"
COPTS
${ABSL_TEST_COPTS}
"-DUNORDERED_SET_CXX17"
DEPS
absl::flat_hash_set
absl::hash_generator_testing
absl::unordered_set_constructor_test
absl::unordered_set_lookup_test
absl::unordered_set_members_test
absl::unordered_set_modifiers_test
absl::memory
absl::strings
gmock_main
)
absl_cc_library(
NAME
node_hash_map
HDRS
"node_hash_map.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::container_memory
absl::hash_function_defaults
absl::node_hash_policy
absl::raw_hash_map
absl::algorithm_container
absl::memory
PUBLIC
)
absl_cc_test(
NAME
node_hash_map_test
SRCS
"node_hash_map_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hash_generator_testing
absl::node_hash_map
absl::tracked
absl::unordered_map_constructor_test
absl::unordered_map_lookup_test
absl::unordered_map_members_test
absl::unordered_map_modifiers_test
gmock_main
)
absl_cc_library(
NAME
node_hash_set
HDRS
"node_hash_set.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::hash_function_defaults
absl::node_hash_policy
absl::raw_hash_set
absl::algorithm_container
absl::memory
PUBLIC
)
absl_cc_test(
NAME
node_hash_set_test
SRCS
"node_hash_set_test.cc"
COPTS
${ABSL_TEST_COPTS}
"-DUNORDERED_SET_CXX17"
DEPS
absl::hash_generator_testing
absl::node_hash_set
absl::unordered_set_constructor_test
absl::unordered_set_lookup_test
absl::unordered_set_members_test
absl::unordered_set_modifiers_test
gmock_main
)
absl_cc_library(
NAME
container_memory
HDRS
"internal/container_memory.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::memory
absl::utility
PUBLIC
)
absl_cc_test(
NAME
container_memory_test
SRCS
"internal/container_memory_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::container_memory
absl::strings
gmock_main
)
absl_cc_library(
NAME
hash_function_defaults
HDRS
"internal/hash_function_defaults.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
absl::hash
absl::strings
PUBLIC
)
absl_cc_test(
NAME
hash_function_defaults_test
SRCS
"internal/hash_function_defaults_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hash_function_defaults
absl::hash
absl::strings
gmock_main
)
absl_cc_library(
NAME
hash_generator_testing
HDRS
"internal/hash_generator_testing.h"
SRCS
"internal/hash_generator_testing.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hash_policy_testing
absl::meta
absl::strings
TESTONLY
)
absl_cc_library(
NAME
hash_policy_testing
HDRS
"internal/hash_policy_testing.h"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hash
absl::strings
TESTONLY
)
absl_cc_test(
NAME
hash_policy_testing_test
SRCS
"internal/hash_policy_testing_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hash_policy_testing
gmock_main
)
absl_cc_library(
NAME
hash_policy_traits
HDRS
"internal/hash_policy_traits.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::meta
PUBLIC
)
absl_cc_test(
NAME
hash_policy_traits_test
SRCS
"internal/hash_policy_traits_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hash_policy_traits
gmock_main
)
absl_cc_library(
NAME
hashtablez_sampler
HDRS
"internal/hashtablez_sampler.h"
SRCS
"internal/hashtablez_sampler.cc"
"internal/hashtablez_sampler_force_weak_definition.cc"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::base
absl::have_sse
absl::synchronization
)
absl_cc_test(
NAME
hashtablez_sampler_test
SRCS
"internal/hashtablez_sampler_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hashtablez_sampler
absl::have_sse
gmock_main
)
absl_cc_library(
NAME
hashtable_debug
HDRS
"internal/hashtable_debug.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::hashtable_debug_hooks
)
absl_cc_library(
NAME
hashtable_debug_hooks
HDRS
"internal/hashtable_debug_hooks.h"
COPTS
${ABSL_DEFAULT_COPTS}
PUBLIC
)
absl_cc_library(
NAME
have_sse
HDRS
"internal/have_sse.h"
COPTS
${ABSL_DEFAULT_COPTS}
)
absl_cc_library(
NAME
node_hash_policy
HDRS
"internal/node_hash_policy.h"
COPTS
${ABSL_DEFAULT_COPTS}
PUBLIC
)
absl_cc_test(
NAME
node_hash_policy_test
SRCS
"internal/node_hash_policy_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hash_policy_traits
absl::node_hash_policy
gmock_main
)
absl_cc_library(
NAME
raw_hash_map
HDRS
"internal/raw_hash_map.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::container_memory
absl::raw_hash_set
absl::throw_delegate
PUBLIC
)
absl_cc_library(
NAME
container_common
HDRS
"internal/commom.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::type_traits
)
absl_cc_library(
NAME
raw_hash_set
HDRS
"internal/raw_hash_set.h"
SRCS
"internal/raw_hash_set.cc"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::bits
absl::compressed_tuple
absl::config
absl::container_common
absl::container_memory
absl::core_headers
absl::endian
absl::hash_policy_traits
absl::hashtable_debug_hooks
absl::have_sse
absl::layout
absl::memory
absl::meta
absl::optional
absl::utility
absl::hashtablez_sampler
PUBLIC
)
absl_cc_test(
NAME
raw_hash_set_test
SRCS
"internal/raw_hash_set_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::container_memory
absl::hash_function_defaults
absl::hash_policy_testing
absl::hashtable_debug
absl::raw_hash_set
absl::base
absl::core_headers
absl::strings
gmock_main
)
absl_cc_test(
NAME
raw_hash_set_allocator_test
SRCS
"internal/raw_hash_set_allocator_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::raw_hash_set
absl::tracked
absl::core_headers
gmock_main
)
absl_cc_library(
NAME
layout
HDRS
"internal/layout.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::core_headers
absl::meta
absl::strings
absl::span
absl::utility
PUBLIC
)
absl_cc_test(
NAME
layout_test
SRCS
"internal/layout_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::layout
absl::base
absl::core_headers
absl::span
gmock_main
)
absl_cc_library(
NAME
tracked
HDRS
"internal/tracked.h"
COPTS
${ABSL_TEST_COPTS}
TESTONLY
)
absl_cc_library(
NAME
unordered_map_constructor_test
HDRS
"internal/unordered_map_constructor_test.h"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
gmock
TESTONLY
)
absl_cc_library(
NAME
unordered_map_lookup_test
HDRS
"internal/unordered_map_lookup_test.h"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
gmock
TESTONLY
)
absl_cc_library(
NAME
unordered_map_members_test
HDRS
"internal/unordered_map_members_test.h"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::type_traits
gmock
TESTONLY
)
absl_cc_library(
NAME
unordered_map_modifiers_test
HDRS
"internal/unordered_map_modifiers_test.h"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
gmock
TESTONLY
)
absl_cc_library(
NAME
unordered_set_constructor_test
HDRS
"internal/unordered_set_constructor_test.h"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
gmock
TESTONLY
)
absl_cc_library(
NAME
unordered_set_lookup_test
HDRS
"internal/unordered_set_lookup_test.h"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
gmock
TESTONLY
)
absl_cc_library(
NAME
unordered_set_members_test
HDRS
"internal/unordered_set_members_test.h"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::type_traits
gmock
TESTONLY
)
absl_cc_library(
NAME
unordered_set_modifiers_test
HDRS
"internal/unordered_set_modifiers_test.h"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
gmock
TESTONLY
)
absl_cc_test(
NAME
unordered_set_test
SRCS
"internal/unordered_set_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::unordered_set_constructor_test
absl::unordered_set_lookup_test
absl::unordered_set_members_test
absl::unordered_set_modifiers_test
gmock_main
)
absl_cc_test(
NAME
unordered_map_test
SRCS
"internal/unordered_map_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::unordered_map_constructor_test
absl::unordered_map_lookup_test
absl::unordered_map_members_test
absl::unordered_map_modifiers_test
gmock_main
)

View File

@@ -0,0 +1,521 @@
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: fixed_array.h
// -----------------------------------------------------------------------------
//
// A `FixedArray<T>` represents a non-resizable array of `T` where the length of
// the array can be determined at run-time. It is a good replacement for
// non-standard and deprecated uses of `alloca()` and variable length arrays
// within the GCC extension. (See
// https://gcc.gnu.org/onlinedocs/gcc/Variable-Length.html).
//
// `FixedArray` allocates small arrays inline, keeping performance fast by
// avoiding heap operations. It also helps reduce the chances of
// accidentally overflowing your stack if large input is passed to
// your function.
#ifndef ABSL_CONTAINER_FIXED_ARRAY_H_
#define ABSL_CONTAINER_FIXED_ARRAY_H_
#include <algorithm>
#include <array>
#include <cassert>
#include <cstddef>
#include <initializer_list>
#include <iterator>
#include <limits>
#include <memory>
#include <new>
#include <type_traits>
#include "absl/algorithm/algorithm.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/throw_delegate.h"
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/base/port.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/memory/memory.h"
namespace absl {
inline namespace lts_2019_08_08 {
constexpr static auto kFixedArrayUseDefault = static_cast<size_t>(-1);
// -----------------------------------------------------------------------------
// FixedArray
// -----------------------------------------------------------------------------
//
// A `FixedArray` provides a run-time fixed-size array, allocating a small array
// inline for efficiency.
//
// Most users should not specify an `inline_elements` argument and let
// `FixedArray` automatically determine the number of elements
// to store inline based on `sizeof(T)`. If `inline_elements` is specified, the
// `FixedArray` implementation will use inline storage for arrays with a
// length <= `inline_elements`.
//
// Note that a `FixedArray` constructed with a `size_type` argument will
// default-initialize its values by leaving trivially constructible types
// uninitialized (e.g. int, int[4], double), and others default-constructed.
// This matches the behavior of c-style arrays and `std::array`, but not
// `std::vector`.
//
// Note that `FixedArray` does not provide a public allocator; if it requires a
// heap allocation, it will do so with global `::operator new[]()` and
// `::operator delete[]()`, even if T provides class-scope overrides for these
// operators.
template <typename T, size_t N = kFixedArrayUseDefault,
typename A = std::allocator<T>>
class FixedArray {
static_assert(!std::is_array<T>::value || std::extent<T>::value > 0,
"Arrays with unknown bounds cannot be used with FixedArray.");
static constexpr size_t kInlineBytesDefault = 256;
using AllocatorTraits = std::allocator_traits<A>;
// std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17,
// but this seems to be mostly pedantic.
template <typename Iterator>
using EnableIfForwardIterator = absl::enable_if_t<std::is_convertible<
typename std::iterator_traits<Iterator>::iterator_category,
std::forward_iterator_tag>::value>;
static constexpr bool NoexceptCopyable() {
return std::is_nothrow_copy_constructible<StorageElement>::value &&
absl::allocator_is_nothrow<allocator_type>::value;
}
static constexpr bool NoexceptMovable() {
return std::is_nothrow_move_constructible<StorageElement>::value &&
absl::allocator_is_nothrow<allocator_type>::value;
}
static constexpr bool DefaultConstructorIsNonTrivial() {
return !absl::is_trivially_default_constructible<StorageElement>::value;
}
public:
using allocator_type = typename AllocatorTraits::allocator_type;
using value_type = typename allocator_type::value_type;
using pointer = typename allocator_type::pointer;
using const_pointer = typename allocator_type::const_pointer;
using reference = typename allocator_type::reference;
using const_reference = typename allocator_type::const_reference;
using size_type = typename allocator_type::size_type;
using difference_type = typename allocator_type::difference_type;
using iterator = pointer;
using const_iterator = const_pointer;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
static constexpr size_type inline_elements =
(N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type)
: static_cast<size_type>(N));
FixedArray(
const FixedArray& other,
const allocator_type& a = allocator_type()) noexcept(NoexceptCopyable())
: FixedArray(other.begin(), other.end(), a) {}
FixedArray(
FixedArray&& other,
const allocator_type& a = allocator_type()) noexcept(NoexceptMovable())
: FixedArray(std::make_move_iterator(other.begin()),
std::make_move_iterator(other.end()), a) {}
// Creates an array object that can store `n` elements.
// Note that trivially constructible elements will be uninitialized.
explicit FixedArray(size_type n, const allocator_type& a = allocator_type())
: storage_(n, a) {
if (DefaultConstructorIsNonTrivial()) {
memory_internal::ConstructRange(storage_.alloc(), storage_.begin(),
storage_.end());
}
}
// Creates an array initialized with `n` copies of `val`.
FixedArray(size_type n, const value_type& val,
const allocator_type& a = allocator_type())
: storage_(n, a) {
memory_internal::ConstructRange(storage_.alloc(), storage_.begin(),
storage_.end(), val);
}
// Creates an array initialized with the size and contents of `init_list`.
FixedArray(std::initializer_list<value_type> init_list,
const allocator_type& a = allocator_type())
: FixedArray(init_list.begin(), init_list.end(), a) {}
// Creates an array initialized with the elements from the input
// range. The array's size will always be `std::distance(first, last)`.
// REQUIRES: Iterator must be a forward_iterator or better.
template <typename Iterator, EnableIfForwardIterator<Iterator>* = nullptr>
FixedArray(Iterator first, Iterator last,
const allocator_type& a = allocator_type())
: storage_(std::distance(first, last), a) {
memory_internal::CopyRange(storage_.alloc(), storage_.begin(), first, last);
}
~FixedArray() noexcept {
for (auto* cur = storage_.begin(); cur != storage_.end(); ++cur) {
AllocatorTraits::destroy(storage_.alloc(), cur);
}
}
// Assignments are deleted because they break the invariant that the size of a
// `FixedArray` never changes.
void operator=(FixedArray&&) = delete;
void operator=(const FixedArray&) = delete;
// FixedArray::size()
//
// Returns the length of the fixed array.
size_type size() const { return storage_.size(); }
// FixedArray::max_size()
//
// Returns the largest possible value of `std::distance(begin(), end())` for a
// `FixedArray<T>`. This is equivalent to the most possible addressable bytes
// over the number of bytes taken by T.
constexpr size_type max_size() const {
return (std::numeric_limits<difference_type>::max)() / sizeof(value_type);
}
// FixedArray::empty()
//
// Returns whether or not the fixed array is empty.
bool empty() const { return size() == 0; }
// FixedArray::memsize()
//
// Returns the memory size of the fixed array in bytes.
size_t memsize() const { return size() * sizeof(value_type); }
// FixedArray::data()
//
// Returns a const T* pointer to elements of the `FixedArray`. This pointer
// can be used to access (but not modify) the contained elements.
const_pointer data() const { return AsValueType(storage_.begin()); }
// Overload of FixedArray::data() to return a T* pointer to elements of the
// fixed array. This pointer can be used to access and modify the contained
// elements.
pointer data() { return AsValueType(storage_.begin()); }
// FixedArray::operator[]
//
// Returns a reference the ith element of the fixed array.
// REQUIRES: 0 <= i < size()
reference operator[](size_type i) {
assert(i < size());
return data()[i];
}
// Overload of FixedArray::operator()[] to return a const reference to the
// ith element of the fixed array.
// REQUIRES: 0 <= i < size()
const_reference operator[](size_type i) const {
assert(i < size());
return data()[i];
}
// FixedArray::at
//
// Bounds-checked access. Returns a reference to the ith element of the
// fiexed array, or throws std::out_of_range
reference at(size_type i) {
if (ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
}
return data()[i];
}
// Overload of FixedArray::at() to return a const reference to the ith element
// of the fixed array.
const_reference at(size_type i) const {
if (ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
}
return data()[i];
}
// FixedArray::front()
//
// Returns a reference to the first element of the fixed array.
reference front() { return *begin(); }
// Overload of FixedArray::front() to return a reference to the first element
// of a fixed array of const values.
const_reference front() const { return *begin(); }
// FixedArray::back()
//
// Returns a reference to the last element of the fixed array.
reference back() { return *(end() - 1); }
// Overload of FixedArray::back() to return a reference to the last element
// of a fixed array of const values.
const_reference back() const { return *(end() - 1); }
// FixedArray::begin()
//
// Returns an iterator to the beginning of the fixed array.
iterator begin() { return data(); }
// Overload of FixedArray::begin() to return a const iterator to the
// beginning of the fixed array.
const_iterator begin() const { return data(); }
// FixedArray::cbegin()
//
// Returns a const iterator to the beginning of the fixed array.
const_iterator cbegin() const { return begin(); }
// FixedArray::end()
//
// Returns an iterator to the end of the fixed array.
iterator end() { return data() + size(); }
// Overload of FixedArray::end() to return a const iterator to the end of the
// fixed array.
const_iterator end() const { return data() + size(); }
// FixedArray::cend()
//
// Returns a const iterator to the end of the fixed array.
const_iterator cend() const { return end(); }
// FixedArray::rbegin()
//
// Returns a reverse iterator from the end of the fixed array.
reverse_iterator rbegin() { return reverse_iterator(end()); }
// Overload of FixedArray::rbegin() to return a const reverse iterator from
// the end of the fixed array.
const_reverse_iterator rbegin() const {
return const_reverse_iterator(end());
}
// FixedArray::crbegin()
//
// Returns a const reverse iterator from the end of the fixed array.
const_reverse_iterator crbegin() const { return rbegin(); }
// FixedArray::rend()
//
// Returns a reverse iterator from the beginning of the fixed array.
reverse_iterator rend() { return reverse_iterator(begin()); }
// Overload of FixedArray::rend() for returning a const reverse iterator
// from the beginning of the fixed array.
const_reverse_iterator rend() const {
return const_reverse_iterator(begin());
}
// FixedArray::crend()
//
// Returns a reverse iterator from the beginning of the fixed array.
const_reverse_iterator crend() const { return rend(); }
// FixedArray::fill()
//
// Assigns the given `value` to all elements in the fixed array.
void fill(const value_type& val) { std::fill(begin(), end(), val); }
// Relational operators. Equality operators are elementwise using
// `operator==`, while order operators order FixedArrays lexicographically.
friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) {
return absl::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
}
friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) {
return !(lhs == rhs);
}
friend bool operator<(const FixedArray& lhs, const FixedArray& rhs) {
return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(),
rhs.end());
}
friend bool operator>(const FixedArray& lhs, const FixedArray& rhs) {
return rhs < lhs;
}
friend bool operator<=(const FixedArray& lhs, const FixedArray& rhs) {
return !(rhs < lhs);
}
friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) {
return !(lhs < rhs);
}
template <typename H>
friend H AbslHashValue(H h, const FixedArray& v) {
return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()),
v.size());
}
private:
// StorageElement
//
// For FixedArrays with a C-style-array value_type, StorageElement is a POD
// wrapper struct called StorageElementWrapper that holds the value_type
// instance inside. This is needed for construction and destruction of the
// entire array regardless of how many dimensions it has. For all other cases,
// StorageElement is just an alias of value_type.
//
// Maintainer's Note: The simpler solution would be to simply wrap value_type
// in a struct whether it's an array or not. That causes some paranoid
// diagnostics to misfire, believing that 'data()' returns a pointer to a
// single element, rather than the packed array that it really is.
// e.g.:
//
// FixedArray<char> buf(1);
// sprintf(buf.data(), "foo");
//
// error: call to int __builtin___sprintf_chk(etc...)
// will always overflow destination buffer [-Werror]
//
template <typename OuterT = value_type,
typename InnerT = absl::remove_extent_t<OuterT>,
size_t InnerN = std::extent<OuterT>::value>
struct StorageElementWrapper {
InnerT array[InnerN];
};
using StorageElement =
absl::conditional_t<std::is_array<value_type>::value,
StorageElementWrapper<value_type>, value_type>;
using StorageElementBuffer =
absl::aligned_storage_t<sizeof(StorageElement), alignof(StorageElement)>;
static pointer AsValueType(pointer ptr) { return ptr; }
static pointer AsValueType(StorageElementWrapper<value_type>* ptr) {
return std::addressof(ptr->array);
}
static_assert(sizeof(StorageElement) == sizeof(value_type), "");
static_assert(alignof(StorageElement) == alignof(value_type), "");
struct NonEmptyInlinedStorage {
StorageElement* data() {
return reinterpret_cast<StorageElement*>(inlined_storage_.data());
}
#ifdef ADDRESS_SANITIZER
void* RedzoneBegin() { return &redzone_begin_; }
void* RedzoneEnd() { return &redzone_end_ + 1; }
#endif // ADDRESS_SANITIZER
void AnnotateConstruct(size_type);
void AnnotateDestruct(size_type);
ADDRESS_SANITIZER_REDZONE(redzone_begin_);
std::array<StorageElementBuffer, inline_elements> inlined_storage_;
ADDRESS_SANITIZER_REDZONE(redzone_end_);
};
struct EmptyInlinedStorage {
StorageElement* data() { return nullptr; }
void AnnotateConstruct(size_type) {}
void AnnotateDestruct(size_type) {}
};
using InlinedStorage =
absl::conditional_t<inline_elements == 0, EmptyInlinedStorage,
NonEmptyInlinedStorage>;
// Storage
//
// An instance of Storage manages the inline and out-of-line memory for
// instances of FixedArray. This guarantees that even when construction of
// individual elements fails in the FixedArray constructor body, the
// destructor for Storage will still be called and out-of-line memory will be
// properly deallocated.
//
class Storage : public InlinedStorage {
public:
Storage(size_type n, const allocator_type& a)
: size_alloc_(n, a), data_(InitializeData()) {}
~Storage() noexcept {
if (UsingInlinedStorage(size())) {
InlinedStorage::AnnotateDestruct(size());
} else {
AllocatorTraits::deallocate(alloc(), AsValueType(begin()), size());
}
}
size_type size() const { return size_alloc_.template get<0>(); }
StorageElement* begin() const { return data_; }
StorageElement* end() const { return begin() + size(); }
allocator_type& alloc() {
return size_alloc_.template get<1>();
}
private:
static bool UsingInlinedStorage(size_type n) {
return n <= inline_elements;
}
StorageElement* InitializeData() {
if (UsingInlinedStorage(size())) {
InlinedStorage::AnnotateConstruct(size());
return InlinedStorage::data();
} else {
return reinterpret_cast<StorageElement*>(
AllocatorTraits::allocate(alloc(), size()));
}
}
// `CompressedTuple` takes advantage of EBCO for stateless `allocator_type`s
container_internal::CompressedTuple<size_type, allocator_type> size_alloc_;
StorageElement* data_;
};
Storage storage_;
};
template <typename T, size_t N, typename A>
constexpr size_t FixedArray<T, N, A>::kInlineBytesDefault;
template <typename T, size_t N, typename A>
constexpr typename FixedArray<T, N, A>::size_type
FixedArray<T, N, A>::inline_elements;
template <typename T, size_t N, typename A>
void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateConstruct(
typename FixedArray<T, N, A>::size_type n) {
#ifdef ADDRESS_SANITIZER
if (!n) return;
ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), data() + n);
ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), RedzoneBegin());
#endif // ADDRESS_SANITIZER
static_cast<void>(n); // Mark used when not in asan mode
}
template <typename T, size_t N, typename A>
void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateDestruct(
typename FixedArray<T, N, A>::size_type n) {
#ifdef ADDRESS_SANITIZER
if (!n) return;
ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, RedzoneEnd());
ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), data());
#endif // ADDRESS_SANITIZER
static_cast<void>(n); // Mark used when not in asan mode
}
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_FIXED_ARRAY_H_

View File

@@ -0,0 +1,67 @@
// Copyright 2019 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stddef.h>
#include <string>
#include "benchmark/benchmark.h"
#include "absl/container/fixed_array.h"
namespace {
// For benchmarking -- simple class with constructor and destructor that
// set an int to a constant..
class SimpleClass {
public:
SimpleClass() : i(3) {}
~SimpleClass() { i = 0; }
private:
int i;
};
template <typename C, size_t stack_size>
void BM_FixedArray(benchmark::State& state) {
const int size = state.range(0);
for (auto _ : state) {
absl::FixedArray<C, stack_size> fa(size);
benchmark::DoNotOptimize(fa.data());
}
}
BENCHMARK_TEMPLATE(BM_FixedArray, char, absl::kFixedArrayUseDefault)
->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, char, 0)->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, char, 1)->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, char, 16)->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, char, 256)->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, char, 65536)->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, absl::kFixedArrayUseDefault)
->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 0)->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 1)->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 16)->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 256)->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 65536)->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, std::string, absl::kFixedArrayUseDefault)
->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 0)->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 1)->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 16)->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 256)->Range(0, 1 << 16);
BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 65536)->Range(0, 1 << 16);
} // namespace

View File

@@ -0,0 +1,118 @@
// Copyright 2019 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <initializer_list>
#include "gtest/gtest.h"
#include "absl/base/internal/exception_safety_testing.h"
#include "absl/container/fixed_array.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace {
constexpr size_t kInlined = 25;
constexpr size_t kSmallSize = kInlined / 2;
constexpr size_t kLargeSize = kInlined * 2;
constexpr int kInitialValue = 5;
constexpr int kUpdatedValue = 10;
using ::testing::TestThrowingCtor;
using Thrower = testing::ThrowingValue<testing::TypeSpec::kEverythingThrows>;
using FixedArr = absl::FixedArray<Thrower, kInlined>;
using MoveThrower = testing::ThrowingValue<testing::TypeSpec::kNoThrowMove>;
using MoveFixedArr = absl::FixedArray<MoveThrower, kInlined>;
TEST(FixedArrayExceptionSafety, CopyConstructor) {
auto small = FixedArr(kSmallSize);
TestThrowingCtor<FixedArr>(small);
auto large = FixedArr(kLargeSize);
TestThrowingCtor<FixedArr>(large);
}
TEST(FixedArrayExceptionSafety, MoveConstructor) {
TestThrowingCtor<FixedArr>(FixedArr(kSmallSize));
TestThrowingCtor<FixedArr>(FixedArr(kLargeSize));
// TypeSpec::kNoThrowMove
TestThrowingCtor<MoveFixedArr>(MoveFixedArr(kSmallSize));
TestThrowingCtor<MoveFixedArr>(MoveFixedArr(kLargeSize));
}
TEST(FixedArrayExceptionSafety, SizeConstructor) {
TestThrowingCtor<FixedArr>(kSmallSize);
TestThrowingCtor<FixedArr>(kLargeSize);
}
TEST(FixedArrayExceptionSafety, SizeValueConstructor) {
TestThrowingCtor<FixedArr>(kSmallSize, Thrower());
TestThrowingCtor<FixedArr>(kLargeSize, Thrower());
}
TEST(FixedArrayExceptionSafety, IteratorConstructor) {
auto small = FixedArr(kSmallSize);
TestThrowingCtor<FixedArr>(small.begin(), small.end());
auto large = FixedArr(kLargeSize);
TestThrowingCtor<FixedArr>(large.begin(), large.end());
}
TEST(FixedArrayExceptionSafety, InitListConstructor) {
constexpr int small_inlined = 3;
using SmallFixedArr = absl::FixedArray<Thrower, small_inlined>;
TestThrowingCtor<SmallFixedArr>(std::initializer_list<Thrower>{});
// Test inlined allocation
TestThrowingCtor<SmallFixedArr>(
std::initializer_list<Thrower>{Thrower{}, Thrower{}});
// Test out of line allocation
TestThrowingCtor<SmallFixedArr>(std::initializer_list<Thrower>{
Thrower{}, Thrower{}, Thrower{}, Thrower{}, Thrower{}});
}
testing::AssertionResult ReadMemory(FixedArr* fixed_arr) {
// Marked volatile to prevent optimization. Used for running asan tests.
volatile int sum = 0;
for (const auto& thrower : *fixed_arr) {
sum += thrower.Get();
}
return testing::AssertionSuccess() << "Values sum to [" << sum << "]";
}
TEST(FixedArrayExceptionSafety, Fill) {
auto test_fill = testing::MakeExceptionSafetyTester()
.WithContracts(ReadMemory)
.WithOperation([&](FixedArr* fixed_arr_ptr) {
auto thrower =
Thrower(kUpdatedValue, testing::nothrow_ctor);
fixed_arr_ptr->fill(thrower);
});
EXPECT_TRUE(
test_fill.WithInitialValue(FixedArr(kSmallSize, Thrower(kInitialValue)))
.Test());
EXPECT_TRUE(
test_fill.WithInitialValue(FixedArr(kLargeSize, Thrower(kInitialValue)))
.Test());
}
} // namespace
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,883 @@
// Copyright 2019 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/container/fixed_array.h"
#include <stdio.h>
#include <cstring>
#include <list>
#include <memory>
#include <numeric>
#include <scoped_allocator>
#include <stdexcept>
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/internal/exception_testing.h"
#include "absl/hash/hash_testing.h"
#include "absl/memory/memory.h"
using ::testing::ElementsAreArray;
namespace {
// Helper routine to determine if a absl::FixedArray used stack allocation.
template <typename ArrayType>
static bool IsOnStack(const ArrayType& a) {
return a.size() <= ArrayType::inline_elements;
}
class ConstructionTester {
public:
ConstructionTester() : self_ptr_(this), value_(0) { constructions++; }
~ConstructionTester() {
assert(self_ptr_ == this);
self_ptr_ = nullptr;
destructions++;
}
// These are incremented as elements are constructed and destructed so we can
// be sure all elements are properly cleaned up.
static int constructions;
static int destructions;
void CheckConstructed() { assert(self_ptr_ == this); }
void set(int value) { value_ = value; }
int get() { return value_; }
private:
// self_ptr_ should always point to 'this' -- that's how we can be sure the
// constructor has been called.
ConstructionTester* self_ptr_;
int value_;
};
int ConstructionTester::constructions = 0;
int ConstructionTester::destructions = 0;
// ThreeInts will initialize its three ints to the value stored in
// ThreeInts::counter. The constructor increments counter so that each object
// in an array of ThreeInts will have different values.
class ThreeInts {
public:
ThreeInts() {
x_ = counter;
y_ = counter;
z_ = counter;
++counter;
}
static int counter;
int x_, y_, z_;
};
int ThreeInts::counter = 0;
TEST(FixedArrayTest, CopyCtor) {
absl::FixedArray<int, 10> on_stack(5);
std::iota(on_stack.begin(), on_stack.end(), 0);
absl::FixedArray<int, 10> stack_copy = on_stack;
EXPECT_THAT(stack_copy, ElementsAreArray(on_stack));
EXPECT_TRUE(IsOnStack(stack_copy));
absl::FixedArray<int, 10> allocated(15);
std::iota(allocated.begin(), allocated.end(), 0);
absl::FixedArray<int, 10> alloced_copy = allocated;
EXPECT_THAT(alloced_copy, ElementsAreArray(allocated));
EXPECT_FALSE(IsOnStack(alloced_copy));
}
TEST(FixedArrayTest, MoveCtor) {
absl::FixedArray<std::unique_ptr<int>, 10> on_stack(5);
for (int i = 0; i < 5; ++i) {
on_stack[i] = absl::make_unique<int>(i);
}
absl::FixedArray<std::unique_ptr<int>, 10> stack_copy = std::move(on_stack);
for (int i = 0; i < 5; ++i) EXPECT_EQ(*(stack_copy[i]), i);
EXPECT_EQ(stack_copy.size(), on_stack.size());
absl::FixedArray<std::unique_ptr<int>, 10> allocated(15);
for (int i = 0; i < 15; ++i) {
allocated[i] = absl::make_unique<int>(i);
}
absl::FixedArray<std::unique_ptr<int>, 10> alloced_copy =
std::move(allocated);
for (int i = 0; i < 15; ++i) EXPECT_EQ(*(alloced_copy[i]), i);
EXPECT_EQ(allocated.size(), alloced_copy.size());
}
TEST(FixedArrayTest, SmallObjects) {
// Small object arrays
{
// Short arrays should be on the stack
absl::FixedArray<int> array(4);
EXPECT_TRUE(IsOnStack(array));
}
{
// Large arrays should be on the heap
absl::FixedArray<int> array(1048576);
EXPECT_FALSE(IsOnStack(array));
}
{
// Arrays of <= default size should be on the stack
absl::FixedArray<int, 100> array(100);
EXPECT_TRUE(IsOnStack(array));
}
{
// Arrays of > default size should be on the heap
absl::FixedArray<int, 100> array(101);
EXPECT_FALSE(IsOnStack(array));
}
{
// Arrays with different size elements should use approximately
// same amount of stack space
absl::FixedArray<int> array1(0);
absl::FixedArray<char> array2(0);
EXPECT_LE(sizeof(array1), sizeof(array2) + 100);
EXPECT_LE(sizeof(array2), sizeof(array1) + 100);
}
{
// Ensure that vectors are properly constructed inside a fixed array.
absl::FixedArray<std::vector<int>> array(2);
EXPECT_EQ(0, array[0].size());
EXPECT_EQ(0, array[1].size());
}
{
// Regardless of absl::FixedArray implementation, check that a type with a
// low alignment requirement and a non power-of-two size is initialized
// correctly.
ThreeInts::counter = 1;
absl::FixedArray<ThreeInts> array(2);
EXPECT_EQ(1, array[0].x_);
EXPECT_EQ(1, array[0].y_);
EXPECT_EQ(1, array[0].z_);
EXPECT_EQ(2, array[1].x_);
EXPECT_EQ(2, array[1].y_);
EXPECT_EQ(2, array[1].z_);
}
}
TEST(FixedArrayTest, AtThrows) {
absl::FixedArray<int> a = {1, 2, 3};
EXPECT_EQ(a.at(2), 3);
ABSL_BASE_INTERNAL_EXPECT_FAIL(a.at(3), std::out_of_range,
"failed bounds check");
}
TEST(FixedArrayRelationalsTest, EqualArrays) {
for (int i = 0; i < 10; ++i) {
absl::FixedArray<int, 5> a1(i);
std::iota(a1.begin(), a1.end(), 0);
absl::FixedArray<int, 5> a2(a1.begin(), a1.end());
EXPECT_TRUE(a1 == a2);
EXPECT_FALSE(a1 != a2);
EXPECT_TRUE(a2 == a1);
EXPECT_FALSE(a2 != a1);
EXPECT_FALSE(a1 < a2);
EXPECT_FALSE(a1 > a2);
EXPECT_FALSE(a2 < a1);
EXPECT_FALSE(a2 > a1);
EXPECT_TRUE(a1 <= a2);
EXPECT_TRUE(a1 >= a2);
EXPECT_TRUE(a2 <= a1);
EXPECT_TRUE(a2 >= a1);
}
}
TEST(FixedArrayRelationalsTest, UnequalArrays) {
for (int i = 1; i < 10; ++i) {
absl::FixedArray<int, 5> a1(i);
std::iota(a1.begin(), a1.end(), 0);
absl::FixedArray<int, 5> a2(a1.begin(), a1.end());
--a2[i / 2];
EXPECT_FALSE(a1 == a2);
EXPECT_TRUE(a1 != a2);
EXPECT_FALSE(a2 == a1);
EXPECT_TRUE(a2 != a1);
EXPECT_FALSE(a1 < a2);
EXPECT_TRUE(a1 > a2);
EXPECT_TRUE(a2 < a1);
EXPECT_FALSE(a2 > a1);
EXPECT_FALSE(a1 <= a2);
EXPECT_TRUE(a1 >= a2);
EXPECT_TRUE(a2 <= a1);
EXPECT_FALSE(a2 >= a1);
}
}
template <int stack_elements>
static void TestArray(int n) {
SCOPED_TRACE(n);
SCOPED_TRACE(stack_elements);
ConstructionTester::constructions = 0;
ConstructionTester::destructions = 0;
{
absl::FixedArray<ConstructionTester, stack_elements> array(n);
EXPECT_THAT(array.size(), n);
EXPECT_THAT(array.memsize(), sizeof(ConstructionTester) * n);
EXPECT_THAT(array.begin() + n, array.end());
// Check that all elements were constructed
for (int i = 0; i < n; i++) {
array[i].CheckConstructed();
}
// Check that no other elements were constructed
EXPECT_THAT(ConstructionTester::constructions, n);
// Test operator[]
for (int i = 0; i < n; i++) {
array[i].set(i);
}
for (int i = 0; i < n; i++) {
EXPECT_THAT(array[i].get(), i);
EXPECT_THAT(array.data()[i].get(), i);
}
// Test data()
for (int i = 0; i < n; i++) {
array.data()[i].set(i + 1);
}
for (int i = 0; i < n; i++) {
EXPECT_THAT(array[i].get(), i + 1);
EXPECT_THAT(array.data()[i].get(), i + 1);
}
} // Close scope containing 'array'.
// Check that all constructed elements were destructed.
EXPECT_EQ(ConstructionTester::constructions,
ConstructionTester::destructions);
}
template <int elements_per_inner_array, int inline_elements>
static void TestArrayOfArrays(int n) {
SCOPED_TRACE(n);
SCOPED_TRACE(inline_elements);
SCOPED_TRACE(elements_per_inner_array);
ConstructionTester::constructions = 0;
ConstructionTester::destructions = 0;
{
using InnerArray = ConstructionTester[elements_per_inner_array];
// Heap-allocate the FixedArray to avoid blowing the stack frame.
auto array_ptr =
absl::make_unique<absl::FixedArray<InnerArray, inline_elements>>(n);
auto& array = *array_ptr;
ASSERT_EQ(array.size(), n);
ASSERT_EQ(array.memsize(),
sizeof(ConstructionTester) * elements_per_inner_array * n);
ASSERT_EQ(array.begin() + n, array.end());
// Check that all elements were constructed
for (int i = 0; i < n; i++) {
for (int j = 0; j < elements_per_inner_array; j++) {
(array[i])[j].CheckConstructed();
}
}
// Check that no other elements were constructed
ASSERT_EQ(ConstructionTester::constructions, n * elements_per_inner_array);
// Test operator[]
for (int i = 0; i < n; i++) {
for (int j = 0; j < elements_per_inner_array; j++) {
(array[i])[j].set(i * elements_per_inner_array + j);
}
}
for (int i = 0; i < n; i++) {
for (int j = 0; j < elements_per_inner_array; j++) {
ASSERT_EQ((array[i])[j].get(), i * elements_per_inner_array + j);
ASSERT_EQ((array.data()[i])[j].get(), i * elements_per_inner_array + j);
}
}
// Test data()
for (int i = 0; i < n; i++) {
for (int j = 0; j < elements_per_inner_array; j++) {
(array.data()[i])[j].set((i + 1) * elements_per_inner_array + j);
}
}
for (int i = 0; i < n; i++) {
for (int j = 0; j < elements_per_inner_array; j++) {
ASSERT_EQ((array[i])[j].get(), (i + 1) * elements_per_inner_array + j);
ASSERT_EQ((array.data()[i])[j].get(),
(i + 1) * elements_per_inner_array + j);
}
}
} // Close scope containing 'array'.
// Check that all constructed elements were destructed.
EXPECT_EQ(ConstructionTester::constructions,
ConstructionTester::destructions);
}
TEST(IteratorConstructorTest, NonInline) {
int const kInput[] = {2, 3, 5, 7, 11, 13, 17};
absl::FixedArray<int, ABSL_ARRAYSIZE(kInput) - 1> const fixed(
kInput, kInput + ABSL_ARRAYSIZE(kInput));
ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size());
for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) {
ASSERT_EQ(kInput[i], fixed[i]);
}
}
TEST(IteratorConstructorTest, Inline) {
int const kInput[] = {2, 3, 5, 7, 11, 13, 17};
absl::FixedArray<int, ABSL_ARRAYSIZE(kInput)> const fixed(
kInput, kInput + ABSL_ARRAYSIZE(kInput));
ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size());
for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) {
ASSERT_EQ(kInput[i], fixed[i]);
}
}
TEST(IteratorConstructorTest, NonPod) {
char const* kInput[] = {"red", "orange", "yellow", "green",
"blue", "indigo", "violet"};
absl::FixedArray<std::string> const fixed(kInput,
kInput + ABSL_ARRAYSIZE(kInput));
ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size());
for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) {
ASSERT_EQ(kInput[i], fixed[i]);
}
}
TEST(IteratorConstructorTest, FromEmptyVector) {
std::vector<int> const empty;
absl::FixedArray<int> const fixed(empty.begin(), empty.end());
EXPECT_EQ(0, fixed.size());
EXPECT_EQ(empty.size(), fixed.size());
}
TEST(IteratorConstructorTest, FromNonEmptyVector) {
int const kInput[] = {2, 3, 5, 7, 11, 13, 17};
std::vector<int> const items(kInput, kInput + ABSL_ARRAYSIZE(kInput));
absl::FixedArray<int> const fixed(items.begin(), items.end());
ASSERT_EQ(items.size(), fixed.size());
for (size_t i = 0; i < items.size(); ++i) {
ASSERT_EQ(items[i], fixed[i]);
}
}
TEST(IteratorConstructorTest, FromBidirectionalIteratorRange) {
int const kInput[] = {2, 3, 5, 7, 11, 13, 17};
std::list<int> const items(kInput, kInput + ABSL_ARRAYSIZE(kInput));
absl::FixedArray<int> const fixed(items.begin(), items.end());
EXPECT_THAT(fixed, testing::ElementsAreArray(kInput));
}
TEST(InitListConstructorTest, InitListConstruction) {
absl::FixedArray<int> fixed = {1, 2, 3};
EXPECT_THAT(fixed, testing::ElementsAreArray({1, 2, 3}));
}
TEST(FillConstructorTest, NonEmptyArrays) {
absl::FixedArray<int> stack_array(4, 1);
EXPECT_THAT(stack_array, testing::ElementsAreArray({1, 1, 1, 1}));
absl::FixedArray<int, 0> heap_array(4, 1);
EXPECT_THAT(stack_array, testing::ElementsAreArray({1, 1, 1, 1}));
}
TEST(FillConstructorTest, EmptyArray) {
absl::FixedArray<int> empty_fill(0, 1);
absl::FixedArray<int> empty_size(0);
EXPECT_EQ(empty_fill, empty_size);
}
TEST(FillConstructorTest, NotTriviallyCopyable) {
std::string str = "abcd";
absl::FixedArray<std::string> strings = {str, str, str, str};
absl::FixedArray<std::string> array(4, str);
EXPECT_EQ(array, strings);
}
TEST(FillConstructorTest, Disambiguation) {
absl::FixedArray<size_t> a(1, 2);
EXPECT_THAT(a, testing::ElementsAre(2));
}
TEST(FixedArrayTest, ManySizedArrays) {
std::vector<int> sizes;
for (int i = 1; i < 100; i++) sizes.push_back(i);
for (int i = 100; i <= 1000; i += 100) sizes.push_back(i);
for (int n : sizes) {
TestArray<0>(n);
TestArray<1>(n);
TestArray<64>(n);
TestArray<1000>(n);
}
}
TEST(FixedArrayTest, ManySizedArraysOfArraysOf1) {
for (int n = 1; n < 1000; n++) {
ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 0>(n)));
ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 1>(n)));
ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 64>(n)));
ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 1000>(n)));
}
}
TEST(FixedArrayTest, ManySizedArraysOfArraysOf2) {
for (int n = 1; n < 1000; n++) {
TestArrayOfArrays<2, 0>(n);
TestArrayOfArrays<2, 1>(n);
TestArrayOfArrays<2, 64>(n);
TestArrayOfArrays<2, 1000>(n);
}
}
// If value_type is put inside of a struct container,
// we might evoke this error in a hardened build unless data() is carefully
// written, so check on that.
// error: call to int __builtin___sprintf_chk(etc...)
// will always overflow destination buffer [-Werror]
TEST(FixedArrayTest, AvoidParanoidDiagnostics) {
absl::FixedArray<char, 32> buf(32);
sprintf(buf.data(), "foo"); // NOLINT(runtime/printf)
}
TEST(FixedArrayTest, TooBigInlinedSpace) {
struct TooBig {
char c[1 << 20];
}; // too big for even one on the stack
// Simulate the data members of absl::FixedArray, a pointer and a size_t.
struct Data {
TooBig* p;
size_t size;
};
// Make sure TooBig objects are not inlined for 0 or default size.
static_assert(sizeof(absl::FixedArray<TooBig, 0>) == sizeof(Data),
"0-sized absl::FixedArray should have same size as Data.");
static_assert(alignof(absl::FixedArray<TooBig, 0>) == alignof(Data),
"0-sized absl::FixedArray should have same alignment as Data.");
static_assert(sizeof(absl::FixedArray<TooBig>) == sizeof(Data),
"default-sized absl::FixedArray should have same size as Data");
static_assert(
alignof(absl::FixedArray<TooBig>) == alignof(Data),
"default-sized absl::FixedArray should have same alignment as Data.");
}
// PickyDelete EXPECTs its class-scope deallocation funcs are unused.
struct PickyDelete {
PickyDelete() {}
~PickyDelete() {}
void operator delete(void* p) {
EXPECT_TRUE(false) << __FUNCTION__;
::operator delete(p);
}
void operator delete[](void* p) {
EXPECT_TRUE(false) << __FUNCTION__;
::operator delete[](p);
}
};
TEST(FixedArrayTest, UsesGlobalAlloc) { absl::FixedArray<PickyDelete, 0> a(5); }
TEST(FixedArrayTest, Data) {
static const int kInput[] = {2, 3, 5, 7, 11, 13, 17};
absl::FixedArray<int> fa(std::begin(kInput), std::end(kInput));
EXPECT_EQ(fa.data(), &*fa.begin());
EXPECT_EQ(fa.data(), &fa[0]);
const absl::FixedArray<int>& cfa = fa;
EXPECT_EQ(cfa.data(), &*cfa.begin());
EXPECT_EQ(cfa.data(), &cfa[0]);
}
TEST(FixedArrayTest, Empty) {
absl::FixedArray<int> empty(0);
absl::FixedArray<int> inline_filled(1);
absl::FixedArray<int, 0> heap_filled(1);
EXPECT_TRUE(empty.empty());
EXPECT_FALSE(inline_filled.empty());
EXPECT_FALSE(heap_filled.empty());
}
TEST(FixedArrayTest, FrontAndBack) {
absl::FixedArray<int, 3 * sizeof(int)> inlined = {1, 2, 3};
EXPECT_EQ(inlined.front(), 1);
EXPECT_EQ(inlined.back(), 3);
absl::FixedArray<int, 0> allocated = {1, 2, 3};
EXPECT_EQ(allocated.front(), 1);
EXPECT_EQ(allocated.back(), 3);
absl::FixedArray<int> one_element = {1};
EXPECT_EQ(one_element.front(), one_element.back());
}
TEST(FixedArrayTest, ReverseIteratorInlined) {
absl::FixedArray<int, 5 * sizeof(int)> a = {0, 1, 2, 3, 4};
int counter = 5;
for (absl::FixedArray<int>::reverse_iterator iter = a.rbegin();
iter != a.rend(); ++iter) {
counter--;
EXPECT_EQ(counter, *iter);
}
EXPECT_EQ(counter, 0);
counter = 5;
for (absl::FixedArray<int>::const_reverse_iterator iter = a.rbegin();
iter != a.rend(); ++iter) {
counter--;
EXPECT_EQ(counter, *iter);
}
EXPECT_EQ(counter, 0);
counter = 5;
for (auto iter = a.crbegin(); iter != a.crend(); ++iter) {
counter--;
EXPECT_EQ(counter, *iter);
}
EXPECT_EQ(counter, 0);
}
TEST(FixedArrayTest, ReverseIteratorAllocated) {
absl::FixedArray<int, 0> a = {0, 1, 2, 3, 4};
int counter = 5;
for (absl::FixedArray<int>::reverse_iterator iter = a.rbegin();
iter != a.rend(); ++iter) {
counter--;
EXPECT_EQ(counter, *iter);
}
EXPECT_EQ(counter, 0);
counter = 5;
for (absl::FixedArray<int>::const_reverse_iterator iter = a.rbegin();
iter != a.rend(); ++iter) {
counter--;
EXPECT_EQ(counter, *iter);
}
EXPECT_EQ(counter, 0);
counter = 5;
for (auto iter = a.crbegin(); iter != a.crend(); ++iter) {
counter--;
EXPECT_EQ(counter, *iter);
}
EXPECT_EQ(counter, 0);
}
TEST(FixedArrayTest, Fill) {
absl::FixedArray<int, 5 * sizeof(int)> inlined(5);
int fill_val = 42;
inlined.fill(fill_val);
for (int i : inlined) EXPECT_EQ(i, fill_val);
absl::FixedArray<int, 0> allocated(5);
allocated.fill(fill_val);
for (int i : allocated) EXPECT_EQ(i, fill_val);
// It doesn't do anything, just make sure this compiles.
absl::FixedArray<int> empty(0);
empty.fill(fill_val);
}
// TODO(johnsoncj): Investigate InlinedStorage default initialization in GCC 4.x
#ifndef __GNUC__
TEST(FixedArrayTest, DefaultCtorDoesNotValueInit) {
using T = char;
constexpr auto capacity = 10;
using FixedArrType = absl::FixedArray<T, capacity>;
using FixedArrBuffType =
absl::aligned_storage_t<sizeof(FixedArrType), alignof(FixedArrType)>;
constexpr auto scrubbed_bits = 0x95;
constexpr auto length = capacity / 2;
FixedArrBuffType buff;
std::memset(std::addressof(buff), scrubbed_bits, sizeof(FixedArrBuffType));
FixedArrType* arr =
::new (static_cast<void*>(std::addressof(buff))) FixedArrType(length);
EXPECT_THAT(*arr, testing::Each(scrubbed_bits));
arr->~FixedArrType();
}
#endif // __GNUC__
// This is a stateful allocator, but the state lives outside of the
// allocator (in whatever test is using the allocator). This is odd
// but helps in tests where the allocator is propagated into nested
// containers - that chain of allocators uses the same state and is
// thus easier to query for aggregate allocation information.
template <typename T>
class CountingAllocator : public std::allocator<T> {
public:
using Alloc = std::allocator<T>;
using pointer = typename Alloc::pointer;
using size_type = typename Alloc::size_type;
CountingAllocator() : bytes_used_(nullptr), instance_count_(nullptr) {}
explicit CountingAllocator(int64_t* b)
: bytes_used_(b), instance_count_(nullptr) {}
CountingAllocator(int64_t* b, int64_t* a)
: bytes_used_(b), instance_count_(a) {}
template <typename U>
explicit CountingAllocator(const CountingAllocator<U>& x)
: Alloc(x),
bytes_used_(x.bytes_used_),
instance_count_(x.instance_count_) {}
pointer allocate(size_type n, const void* const hint = nullptr) {
assert(bytes_used_ != nullptr);
*bytes_used_ += n * sizeof(T);
return Alloc::allocate(n, hint);
}
void deallocate(pointer p, size_type n) {
Alloc::deallocate(p, n);
assert(bytes_used_ != nullptr);
*bytes_used_ -= n * sizeof(T);
}
template <typename... Args>
void construct(pointer p, Args&&... args) {
Alloc::construct(p, absl::forward<Args>(args)...);
if (instance_count_) {
*instance_count_ += 1;
}
}
void destroy(pointer p) {
Alloc::destroy(p);
if (instance_count_) {
*instance_count_ -= 1;
}
}
template <typename U>
class rebind {
public:
using other = CountingAllocator<U>;
};
int64_t* bytes_used_;
int64_t* instance_count_;
};
TEST(AllocatorSupportTest, CountInlineAllocations) {
constexpr size_t inlined_size = 4;
using Alloc = CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
int64_t allocated = 0;
int64_t active_instances = 0;
{
const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
Alloc alloc(&allocated, &active_instances);
AllocFxdArr arr(ia, ia + inlined_size, alloc);
static_cast<void>(arr);
}
EXPECT_EQ(allocated, 0);
EXPECT_EQ(active_instances, 0);
}
TEST(AllocatorSupportTest, CountOutoflineAllocations) {
constexpr size_t inlined_size = 4;
using Alloc = CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
int64_t allocated = 0;
int64_t active_instances = 0;
{
const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
Alloc alloc(&allocated, &active_instances);
AllocFxdArr arr(ia, ia + ABSL_ARRAYSIZE(ia), alloc);
EXPECT_EQ(allocated, arr.size() * sizeof(int));
static_cast<void>(arr);
}
EXPECT_EQ(active_instances, 0);
}
TEST(AllocatorSupportTest, CountCopyInlineAllocations) {
constexpr size_t inlined_size = 4;
using Alloc = CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
int64_t allocated1 = 0;
int64_t allocated2 = 0;
int64_t active_instances = 0;
Alloc alloc(&allocated1, &active_instances);
Alloc alloc2(&allocated2, &active_instances);
{
int initial_value = 1;
AllocFxdArr arr1(inlined_size / 2, initial_value, alloc);
EXPECT_EQ(allocated1, 0);
AllocFxdArr arr2(arr1, alloc2);
EXPECT_EQ(allocated2, 0);
static_cast<void>(arr1);
static_cast<void>(arr2);
}
EXPECT_EQ(active_instances, 0);
}
TEST(AllocatorSupportTest, CountCopyOutoflineAllocations) {
constexpr size_t inlined_size = 4;
using Alloc = CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
int64_t allocated1 = 0;
int64_t allocated2 = 0;
int64_t active_instances = 0;
Alloc alloc(&allocated1, &active_instances);
Alloc alloc2(&allocated2, &active_instances);
{
int initial_value = 1;
AllocFxdArr arr1(inlined_size * 2, initial_value, alloc);
EXPECT_EQ(allocated1, arr1.size() * sizeof(int));
AllocFxdArr arr2(arr1, alloc2);
EXPECT_EQ(allocated2, inlined_size * 2 * sizeof(int));
static_cast<void>(arr1);
static_cast<void>(arr2);
}
EXPECT_EQ(active_instances, 0);
}
TEST(AllocatorSupportTest, SizeValAllocConstructor) {
using testing::AllOf;
using testing::Each;
using testing::SizeIs;
constexpr size_t inlined_size = 4;
using Alloc = CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
{
auto len = inlined_size / 2;
auto val = 0;
int64_t allocated = 0;
AllocFxdArr arr(len, val, Alloc(&allocated));
EXPECT_EQ(allocated, 0);
EXPECT_THAT(arr, AllOf(SizeIs(len), Each(0)));
}
{
auto len = inlined_size * 2;
auto val = 0;
int64_t allocated = 0;
AllocFxdArr arr(len, val, Alloc(&allocated));
EXPECT_EQ(allocated, len * sizeof(int));
EXPECT_THAT(arr, AllOf(SizeIs(len), Each(0)));
}
}
#ifdef ADDRESS_SANITIZER
TEST(FixedArrayTest, AddressSanitizerAnnotations1) {
absl::FixedArray<int, 32> a(10);
int* raw = a.data();
raw[0] = 0;
raw[9] = 0;
EXPECT_DEATH(raw[-2] = 0, "container-overflow");
EXPECT_DEATH(raw[-1] = 0, "container-overflow");
EXPECT_DEATH(raw[10] = 0, "container-overflow");
EXPECT_DEATH(raw[31] = 0, "container-overflow");
}
TEST(FixedArrayTest, AddressSanitizerAnnotations2) {
absl::FixedArray<char, 17> a(12);
char* raw = a.data();
raw[0] = 0;
raw[11] = 0;
EXPECT_DEATH(raw[-7] = 0, "container-overflow");
EXPECT_DEATH(raw[-1] = 0, "container-overflow");
EXPECT_DEATH(raw[12] = 0, "container-overflow");
EXPECT_DEATH(raw[17] = 0, "container-overflow");
}
TEST(FixedArrayTest, AddressSanitizerAnnotations3) {
absl::FixedArray<uint64_t, 20> a(20);
uint64_t* raw = a.data();
raw[0] = 0;
raw[19] = 0;
EXPECT_DEATH(raw[-1] = 0, "container-overflow");
EXPECT_DEATH(raw[20] = 0, "container-overflow");
}
TEST(FixedArrayTest, AddressSanitizerAnnotations4) {
absl::FixedArray<ThreeInts> a(10);
ThreeInts* raw = a.data();
raw[0] = ThreeInts();
raw[9] = ThreeInts();
// Note: raw[-1] is pointing to 12 bytes before the container range. However,
// there is only a 8-byte red zone before the container range, so we only
// access the last 4 bytes of the struct to make sure it stays within the red
// zone.
EXPECT_DEATH(raw[-1].z_ = 0, "container-overflow");
EXPECT_DEATH(raw[10] = ThreeInts(), "container-overflow");
// The actual size of storage is kDefaultBytes=256, 21*12 = 252,
// so reading raw[21] should still trigger the correct warning.
EXPECT_DEATH(raw[21] = ThreeInts(), "container-overflow");
}
#endif // ADDRESS_SANITIZER
TEST(FixedArrayTest, AbslHashValueWorks) {
using V = absl::FixedArray<int>;
std::vector<V> cases;
// Generate a variety of vectors some of these are small enough for the inline
// space but are stored out of line.
for (int i = 0; i < 10; ++i) {
V v(i);
for (int j = 0; j < i; ++j) {
v[j] = j;
}
cases.push_back(v);
}
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases));
}
} // namespace

View File

@@ -0,0 +1,587 @@
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: flat_hash_map.h
// -----------------------------------------------------------------------------
//
// An `absl::flat_hash_map<K, V>` is an unordered associative container of
// unique keys and associated values designed to be a more efficient replacement
// for `std::unordered_map`. Like `unordered_map`, search, insertion, and
// deletion of map elements can be done as an `O(1)` operation. However,
// `flat_hash_map` (and other unordered associative containers known as the
// collection of Abseil "Swiss tables") contain other optimizations that result
// in both memory and computation advantages.
//
// In most cases, your default choice for a hash map should be a map of type
// `flat_hash_map`.
#ifndef ABSL_CONTAINER_FLAT_HASH_MAP_H_
#define ABSL_CONTAINER_FLAT_HASH_MAP_H_
#include <cstddef>
#include <new>
#include <type_traits>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export
#include "absl/memory/memory.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace container_internal {
template <class K, class V>
struct FlatHashMapPolicy;
} // namespace container_internal
// -----------------------------------------------------------------------------
// absl::flat_hash_map
// -----------------------------------------------------------------------------
//
// An `absl::flat_hash_map<K, V>` is an unordered associative container which
// has been optimized for both speed and memory footprint in most common use
// cases. Its interface is similar to that of `std::unordered_map<K, V>` with
// the following notable differences:
//
// * Requires keys that are CopyConstructible
// * Requires values that are MoveConstructible
// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
// `insert()`, provided that the map is provided a compatible heterogeneous
// hashing function and equality operator.
// * Invalidates any references and pointers to elements within the table after
// `rehash()`.
// * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash map.
// * Returns `void` from the `erase(iterator)` overload.
//
// By default, `flat_hash_map` uses the `absl::Hash` hashing framework.
// All fundamental and Abseil types that support the `absl::Hash` framework have
// a compatible equality operator for comparing insertions into `flat_hash_map`.
// If your type is not yet supported by the `absl::Hash` framework, see
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
// NOTE: A `flat_hash_map` stores its value types directly inside its
// implementation array to avoid memory indirection. Because a `flat_hash_map`
// is designed to move data when rehashed, map values will not retain pointer
// stability. If you require pointer stability, or if your values are large,
// consider using `absl::flat_hash_map<Key, std::unique_ptr<Value>>` instead.
// If your types are not moveable or you require pointer stability for keys,
// consider `absl::node_hash_map`.
//
// Example:
//
// // Create a flat hash map of three strings (that map to strings)
// absl::flat_hash_map<std::string, std::string> ducks =
// {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}};
//
// // Insert a new element into the flat hash map
// ducks.insert({"d", "donald"});
//
// // Force a rehash of the flat hash map
// ducks.rehash(0);
//
// // Find the element with the key "b"
// std::string search_key = "b";
// auto result = ducks.find(search_key);
// if (result != ducks.end()) {
// std::cout << "Result: " << result->second << std::endl;
// }
template <class K, class V,
class Hash = absl::container_internal::hash_default_hash<K>,
class Eq = absl::container_internal::hash_default_eq<K>,
class Allocator = std::allocator<std::pair<const K, V>>>
class flat_hash_map : public absl::container_internal::raw_hash_map<
absl::container_internal::FlatHashMapPolicy<K, V>,
Hash, Eq, Allocator> {
using Base = typename flat_hash_map::raw_hash_map;
public:
// Constructors and Assignment Operators
//
// A flat_hash_map supports the same overload set as `std::unordered_map`
// for construction and assignment:
//
// * Default constructor
//
// // No allocation for the table's elements is made.
// absl::flat_hash_map<int, std::string> map1;
//
// * Initializer List constructor
//
// absl::flat_hash_map<int, std::string> map2 =
// {{1, "huey"}, {2, "dewey"}, {3, "louie"},};
//
// * Copy constructor
//
// absl::flat_hash_map<int, std::string> map3(map2);
//
// * Copy assignment operator
//
// // Hash functor and Comparator are copied as well
// absl::flat_hash_map<int, std::string> map4;
// map4 = map3;
//
// * Move constructor
//
// // Move is guaranteed efficient
// absl::flat_hash_map<int, std::string> map5(std::move(map4));
//
// * Move assignment operator
//
// // May be efficient if allocators are compatible
// absl::flat_hash_map<int, std::string> map6;
// map6 = std::move(map5);
//
// * Range constructor
//
// std::vector<std::pair<int, std::string>> v = {{1, "a"}, {2, "b"}};
// absl::flat_hash_map<int, std::string> map7(v.begin(), v.end());
flat_hash_map() {}
using Base::Base;
// flat_hash_map::begin()
//
// Returns an iterator to the beginning of the `flat_hash_map`.
using Base::begin;
// flat_hash_map::cbegin()
//
// Returns a const iterator to the beginning of the `flat_hash_map`.
using Base::cbegin;
// flat_hash_map::cend()
//
// Returns a const iterator to the end of the `flat_hash_map`.
using Base::cend;
// flat_hash_map::end()
//
// Returns an iterator to the end of the `flat_hash_map`.
using Base::end;
// flat_hash_map::capacity()
//
// Returns the number of element slots (assigned, deleted, and empty)
// available within the `flat_hash_map`.
//
// NOTE: this member function is particular to `absl::flat_hash_map` and is
// not provided in the `std::unordered_map` API.
using Base::capacity;
// flat_hash_map::empty()
//
// Returns whether or not the `flat_hash_map` is empty.
using Base::empty;
// flat_hash_map::max_size()
//
// Returns the largest theoretical possible number of elements within a
// `flat_hash_map` under current memory constraints. This value can be thought
// of the largest value of `std::distance(begin(), end())` for a
// `flat_hash_map<K, V>`.
using Base::max_size;
// flat_hash_map::size()
//
// Returns the number of elements currently within the `flat_hash_map`.
using Base::size;
// flat_hash_map::clear()
//
// Removes all elements from the `flat_hash_map`. Invalidates any references,
// pointers, or iterators referring to contained elements.
//
// NOTE: this operation may shrink the underlying buffer. To avoid shrinking
// the underlying buffer call `erase(begin(), end())`.
using Base::clear;
// flat_hash_map::erase()
//
// Erases elements within the `flat_hash_map`. Erasing does not trigger a
// rehash. Overloads are listed below.
//
// void erase(const_iterator pos):
//
// Erases the element at `position` of the `flat_hash_map`, returning
// `void`.
//
// NOTE: returning `void` in this case is different than that of STL
// containers in general and `std::unordered_map` in particular (which
// return an iterator to the element following the erased element). If that
// iterator is needed, simply post increment the iterator:
//
// map.erase(it++);
//
// iterator erase(const_iterator first, const_iterator last):
//
// Erases the elements in the open interval [`first`, `last`), returning an
// iterator pointing to `last`.
//
// size_type erase(const key_type& key):
//
// Erases the element with the matching key, if it exists.
using Base::erase;
// flat_hash_map::insert()
//
// Inserts an element of the specified value into the `flat_hash_map`,
// returning an iterator pointing to the newly inserted element, provided that
// an element with the given key does not already exist. If rehashing occurs
// due to the insertion, all iterators are invalidated. Overloads are listed
// below.
//
// std::pair<iterator,bool> insert(const init_type& value):
//
// Inserts a value into the `flat_hash_map`. Returns a pair consisting of an
// iterator to the inserted element (or to the element that prevented the
// insertion) and a bool denoting whether the insertion took place.
//
// std::pair<iterator,bool> insert(T&& value):
// std::pair<iterator,bool> insert(init_type&& value):
//
// Inserts a moveable value into the `flat_hash_map`. Returns a pair
// consisting of an iterator to the inserted element (or to the element that
// prevented the insertion) and a bool denoting whether the insertion took
// place.
//
// iterator insert(const_iterator hint, const init_type& value):
// iterator insert(const_iterator hint, T&& value):
// iterator insert(const_iterator hint, init_type&& value);
//
// Inserts a value, using the position of `hint` as a non-binding suggestion
// for where to begin the insertion search. Returns an iterator to the
// inserted element, or to the existing element that prevented the
// insertion.
//
// void insert(InputIterator first, InputIterator last):
//
// Inserts a range of values [`first`, `last`).
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently, for `flat_hash_map` we guarantee the
// first match is inserted.
//
// void insert(std::initializer_list<init_type> ilist):
//
// Inserts the elements within the initializer list `ilist`.
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently within the initializer list, for
// `flat_hash_map` we guarantee the first match is inserted.
using Base::insert;
// flat_hash_map::insert_or_assign()
//
// Inserts an element of the specified value into the `flat_hash_map` provided
// that a value with the given key does not already exist, or replaces it with
// the element value if a key for that value already exists, returning an
// iterator pointing to the newly inserted element. If rehashing occurs due
// to the insertion, all existing iterators are invalidated. Overloads are
// listed below.
//
// pair<iterator, bool> insert_or_assign(const init_type& k, T&& obj):
// pair<iterator, bool> insert_or_assign(init_type&& k, T&& obj):
//
// Inserts/Assigns (or moves) the element of the specified key into the
// `flat_hash_map`.
//
// iterator insert_or_assign(const_iterator hint,
// const init_type& k, T&& obj):
// iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj):
//
// Inserts/Assigns (or moves) the element of the specified key into the
// `flat_hash_map` using the position of `hint` as a non-binding suggestion
// for where to begin the insertion search.
using Base::insert_or_assign;
// flat_hash_map::emplace()
//
// Inserts an element of the specified value by constructing it in-place
// within the `flat_hash_map`, provided that no element with the given key
// already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately. Prefer `try_emplace()` unless your key is not
// copyable or moveable.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace;
// flat_hash_map::emplace_hint()
//
// Inserts an element of the specified value by constructing it in-place
// within the `flat_hash_map`, using the position of `hint` as a non-binding
// suggestion for where to begin the insertion search, and only inserts
// provided that no element with the given key already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately. Prefer `try_emplace()` unless your key is not
// copyable or moveable.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace_hint;
// flat_hash_map::try_emplace()
//
// Inserts an element of the specified value by constructing it in-place
// within the `flat_hash_map`, provided that no element with the given key
// already exists. Unlike `emplace()`, if an element with the given key
// already exists, we guarantee that no element is constructed.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
// Overloads are listed below.
//
// pair<iterator, bool> try_emplace(const key_type& k, Args&&... args):
// pair<iterator, bool> try_emplace(key_type&& k, Args&&... args):
//
// Inserts (via copy or move) the element of the specified key into the
// `flat_hash_map`.
//
// iterator try_emplace(const_iterator hint,
// const init_type& k, Args&&... args):
// iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args):
//
// Inserts (via copy or move) the element of the specified key into the
// `flat_hash_map` using the position of `hint` as a non-binding suggestion
// for where to begin the insertion search.
using Base::try_emplace;
// flat_hash_map::extract()
//
// Extracts the indicated element, erasing it in the process, and returns it
// as a C++17-compatible node handle. Overloads are listed below.
//
// node_type extract(const_iterator position):
//
// Extracts the key,value pair of the element at the indicated position and
// returns a node handle owning that extracted data.
//
// node_type extract(const key_type& x):
//
// Extracts the key,value pair of the element with a key matching the passed
// key value and returns a node handle owning that extracted data. If the
// `flat_hash_map` does not contain an element with a matching key, this
// function returns an empty node handle.
using Base::extract;
// flat_hash_map::merge()
//
// Extracts elements from a given `source` flat hash map into this
// `flat_hash_map`. If the destination `flat_hash_map` already contains an
// element with an equivalent key, that element is not extracted.
using Base::merge;
// flat_hash_map::swap(flat_hash_map& other)
//
// Exchanges the contents of this `flat_hash_map` with those of the `other`
// flat hash map, avoiding invocation of any move, copy, or swap operations on
// individual elements.
//
// All iterators and references on the `flat_hash_map` remain valid, excepting
// for the past-the-end iterator, which is invalidated.
//
// `swap()` requires that the flat hash map's hashing and key equivalence
// functions be Swappable, and are exchaged using unqualified calls to
// non-member `swap()`. If the map's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
// to non-member `swap()`; otherwise, the allocators are not swapped.
using Base::swap;
// flat_hash_map::rehash(count)
//
// Rehashes the `flat_hash_map`, setting the number of slots to be at least
// the passed value. If the new number of slots increases the load factor more
// than the current maximum load factor
// (`count` < `size()` / `max_load_factor()`), then the new number of slots
// will be at least `size()` / `max_load_factor()`.
//
// To force a rehash, pass rehash(0).
//
// NOTE: unlike behavior in `std::unordered_map`, references are also
// invalidated upon a `rehash()`.
using Base::rehash;
// flat_hash_map::reserve(count)
//
// Sets the number of slots in the `flat_hash_map` to the number needed to
// accommodate at least `count` total elements without exceeding the current
// maximum load factor, and may rehash the container if needed.
using Base::reserve;
// flat_hash_map::at()
//
// Returns a reference to the mapped value of the element with key equivalent
// to the passed key.
using Base::at;
// flat_hash_map::contains()
//
// Determines whether an element with a key comparing equal to the given `key`
// exists within the `flat_hash_map`, returning `true` if so or `false`
// otherwise.
using Base::contains;
// flat_hash_map::count(const Key& key) const
//
// Returns the number of elements with a key comparing equal to the given
// `key` within the `flat_hash_map`. note that this function will return
// either `1` or `0` since duplicate keys are not allowed within a
// `flat_hash_map`.
using Base::count;
// flat_hash_map::equal_range()
//
// Returns a closed range [first, last], defined by a `std::pair` of two
// iterators, containing all elements with the passed key in the
// `flat_hash_map`.
using Base::equal_range;
// flat_hash_map::find()
//
// Finds an element with the passed `key` within the `flat_hash_map`.
using Base::find;
// flat_hash_map::operator[]()
//
// Returns a reference to the value mapped to the passed key within the
// `flat_hash_map`, performing an `insert()` if the key does not already
// exist.
//
// If an insertion occurs and results in a rehashing of the container, all
// iterators are invalidated. Otherwise iterators are not affected and
// references are not invalidated. Overloads are listed below.
//
// T& operator[](const Key& key):
//
// Inserts an init_type object constructed in-place if the element with the
// given key does not exist.
//
// T& operator[](Key&& key):
//
// Inserts an init_type object constructed in-place provided that an element
// with the given key does not exist.
using Base::operator[];
// flat_hash_map::bucket_count()
//
// Returns the number of "buckets" within the `flat_hash_map`. Note that
// because a flat hash map contains all elements within its internal storage,
// this value simply equals the current capacity of the `flat_hash_map`.
using Base::bucket_count;
// flat_hash_map::load_factor()
//
// Returns the current load factor of the `flat_hash_map` (the average number
// of slots occupied with a value within the hash map).
using Base::load_factor;
// flat_hash_map::max_load_factor()
//
// Manages the maximum load factor of the `flat_hash_map`. Overloads are
// listed below.
//
// float flat_hash_map::max_load_factor()
//
// Returns the current maximum load factor of the `flat_hash_map`.
//
// void flat_hash_map::max_load_factor(float ml)
//
// Sets the maximum load factor of the `flat_hash_map` to the passed value.
//
// NOTE: This overload is provided only for API compatibility with the STL;
// `flat_hash_map` will ignore any set load factor and manage its rehashing
// internally as an implementation detail.
using Base::max_load_factor;
// flat_hash_map::get_allocator()
//
// Returns the allocator function associated with this `flat_hash_map`.
using Base::get_allocator;
// flat_hash_map::hash_function()
//
// Returns the hashing function used to hash the keys within this
// `flat_hash_map`.
using Base::hash_function;
// flat_hash_map::key_eq()
//
// Returns the function used for comparing keys equality.
using Base::key_eq;
};
namespace container_internal {
template <class K, class V>
struct FlatHashMapPolicy {
using slot_policy = container_internal::map_slot_policy<K, V>;
using slot_type = typename slot_policy::slot_type;
using key_type = K;
using mapped_type = V;
using init_type = std::pair</*non const*/ key_type, mapped_type>;
template <class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
slot_policy::construct(alloc, slot, std::forward<Args>(args)...);
}
template <class Allocator>
static void destroy(Allocator* alloc, slot_type* slot) {
slot_policy::destroy(alloc, slot);
}
template <class Allocator>
static void transfer(Allocator* alloc, slot_type* new_slot,
slot_type* old_slot) {
slot_policy::transfer(alloc, new_slot, old_slot);
}
template <class F, class... Args>
static decltype(absl::container_internal::DecomposePair(
std::declval<F>(), std::declval<Args>()...))
apply(F&& f, Args&&... args) {
return absl::container_internal::DecomposePair(std::forward<F>(f),
std::forward<Args>(args)...);
}
static size_t space_used(const slot_type*) { return 0; }
static std::pair<const K, V>& element(slot_type* slot) { return slot->value; }
static V& value(std::pair<const K, V>* kv) { return kv->second; }
static const V& value(const std::pair<const K, V>* kv) { return kv->second; }
};
} // namespace container_internal
namespace container_algorithm_internal {
// Specialization of trait in absl/algorithm/container.h
template <class Key, class T, class Hash, class KeyEqual, class Allocator>
struct IsUnorderedContainer<
absl::flat_hash_map<Key, T, Hash, KeyEqual, Allocator>> : std::true_type {};
} // namespace container_algorithm_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_FLAT_HASH_MAP_H_

View File

@@ -0,0 +1,249 @@
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/container/flat_hash_map.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/unordered_map_constructor_test.h"
#include "absl/container/internal/unordered_map_lookup_test.h"
#include "absl/container/internal/unordered_map_members_test.h"
#include "absl/container/internal/unordered_map_modifiers_test.h"
#include "absl/types/any.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace container_internal {
namespace {
using ::absl::container_internal::hash_internal::Enum;
using ::absl::container_internal::hash_internal::EnumClass;
using ::testing::_;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
template <class K, class V>
using Map = flat_hash_map<K, V, StatefulTestingHash, StatefulTestingEqual,
Alloc<std::pair<const K, V>>>;
static_assert(!std::is_standard_layout<NonStandardLayout>(), "");
using MapTypes =
::testing::Types<Map<int, int>, Map<std::string, int>,
Map<Enum, std::string>, Map<EnumClass, int>,
Map<int, NonStandardLayout>, Map<NonStandardLayout, int>>;
INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, ConstructorTest, MapTypes);
INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, LookupTest, MapTypes);
INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, MembersTest, MapTypes);
INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, ModifiersTest, MapTypes);
TEST(FlatHashMap, StandardLayout) {
struct Int {
explicit Int(size_t value) : value(value) {}
Int() : value(0) { ADD_FAILURE(); }
Int(const Int& other) : value(other.value) { ADD_FAILURE(); }
Int(Int&&) = default;
bool operator==(const Int& other) const { return value == other.value; }
size_t value;
};
static_assert(std::is_standard_layout<Int>(), "");
struct Hash {
size_t operator()(const Int& obj) const { return obj.value; }
};
// Verify that neither the key nor the value get default-constructed or
// copy-constructed.
{
flat_hash_map<Int, Int, Hash> m;
m.try_emplace(Int(1), Int(2));
m.try_emplace(Int(3), Int(4));
m.erase(Int(1));
m.rehash(2 * m.bucket_count());
}
{
flat_hash_map<Int, Int, Hash> m;
m.try_emplace(Int(1), Int(2));
m.try_emplace(Int(3), Int(4));
m.erase(Int(1));
m.clear();
}
}
// gcc becomes unhappy if this is inside the method, so pull it out here.
struct balast {};
TEST(FlatHashMap, IteratesMsan) {
// Because SwissTable randomizes on pointer addresses, we keep old tables
// around to ensure we don't reuse old memory.
std::vector<absl::flat_hash_map<int, balast>> garbage;
for (int i = 0; i < 100; ++i) {
absl::flat_hash_map<int, balast> t;
for (int j = 0; j < 100; ++j) {
t[j];
for (const auto& p : t) EXPECT_THAT(p, Pair(_, _));
}
garbage.push_back(std::move(t));
}
}
// Demonstration of the "Lazy Key" pattern. This uses heterogeneous insert to
// avoid creating expensive key elements when the item is already present in the
// map.
struct LazyInt {
explicit LazyInt(size_t value, int* tracker)
: value(value), tracker(tracker) {}
explicit operator size_t() const {
++*tracker;
return value;
}
size_t value;
int* tracker;
};
struct Hash {
using is_transparent = void;
int* tracker;
size_t operator()(size_t obj) const {
++*tracker;
return obj;
}
size_t operator()(const LazyInt& obj) const {
++*tracker;
return obj.value;
}
};
struct Eq {
using is_transparent = void;
bool operator()(size_t lhs, size_t rhs) const {
return lhs == rhs;
}
bool operator()(size_t lhs, const LazyInt& rhs) const {
return lhs == rhs.value;
}
};
TEST(FlatHashMap, LazyKeyPattern) {
// hashes are only guaranteed in opt mode, we use assertions to track internal
// state that can cause extra calls to hash.
int conversions = 0;
int hashes = 0;
flat_hash_map<size_t, size_t, Hash, Eq> m(0, Hash{&hashes});
m.reserve(3);
m[LazyInt(1, &conversions)] = 1;
EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 1)));
EXPECT_EQ(conversions, 1);
#ifdef NDEBUG
EXPECT_EQ(hashes, 1);
#endif
m[LazyInt(1, &conversions)] = 2;
EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2)));
EXPECT_EQ(conversions, 1);
#ifdef NDEBUG
EXPECT_EQ(hashes, 2);
#endif
m.try_emplace(LazyInt(2, &conversions), 3);
EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2), Pair(2, 3)));
EXPECT_EQ(conversions, 2);
#ifdef NDEBUG
EXPECT_EQ(hashes, 3);
#endif
m.try_emplace(LazyInt(2, &conversions), 4);
EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2), Pair(2, 3)));
EXPECT_EQ(conversions, 2);
#ifdef NDEBUG
EXPECT_EQ(hashes, 4);
#endif
}
TEST(FlatHashMap, BitfieldArgument) {
union {
int n : 1;
};
n = 0;
flat_hash_map<int, int> m;
m.erase(n);
m.count(n);
m.prefetch(n);
m.find(n);
m.contains(n);
m.equal_range(n);
m.insert_or_assign(n, n);
m.insert_or_assign(m.end(), n, n);
m.try_emplace(n);
m.try_emplace(m.end(), n);
m.at(n);
m[n];
}
TEST(FlatHashMap, MergeExtractInsert) {
// We can't test mutable keys, or non-copyable keys with flat_hash_map.
// Test that the nodes have the proper API.
absl::flat_hash_map<int, int> m = {{1, 7}, {2, 9}};
auto node = m.extract(1);
EXPECT_TRUE(node);
EXPECT_EQ(node.key(), 1);
EXPECT_EQ(node.mapped(), 7);
EXPECT_THAT(m, UnorderedElementsAre(Pair(2, 9)));
node.mapped() = 17;
m.insert(std::move(node));
EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 17), Pair(2, 9)));
}
#if (defined(ABSL_HAVE_STD_ANY) || !defined(_LIBCPP_VERSION)) && \
!defined(__EMSCRIPTEN__)
TEST(FlatHashMap, Any) {
absl::flat_hash_map<int, absl::any> m;
m.emplace(1, 7);
auto it = m.find(1);
ASSERT_NE(it, m.end());
EXPECT_EQ(7, absl::any_cast<int>(it->second));
m.emplace(std::piecewise_construct, std::make_tuple(2), std::make_tuple(8));
it = m.find(2);
ASSERT_NE(it, m.end());
EXPECT_EQ(8, absl::any_cast<int>(it->second));
m.emplace(std::piecewise_construct, std::make_tuple(3),
std::make_tuple(absl::any(9)));
it = m.find(3);
ASSERT_NE(it, m.end());
EXPECT_EQ(9, absl::any_cast<int>(it->second));
struct H {
size_t operator()(const absl::any&) const { return 0; }
};
struct E {
bool operator()(const absl::any&, const absl::any&) const { return true; }
};
absl::flat_hash_map<absl::any, int, H, E> m2;
m2.emplace(1, 7);
auto it2 = m2.find(1);
ASSERT_NE(it2, m2.end());
EXPECT_EQ(7, it2->second);
}
#endif // (defined(ABSL_HAVE_STD_ANY) || !defined(_LIBCPP_VERSION)) &&
// !defined(__EMSCRIPTEN__)
} // namespace
} // namespace container_internal
} // inline namespace lts_2019_08_08
} // namespace absl

View File

@@ -0,0 +1,495 @@
// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// -----------------------------------------------------------------------------
// File: flat_hash_set.h
// -----------------------------------------------------------------------------
//
// An `absl::flat_hash_set<T>` is an unordered associative container designed to
// be a more efficient replacement for `std::unordered_set`. Like
// `unordered_set`, search, insertion, and deletion of set elements can be done
// as an `O(1)` operation. However, `flat_hash_set` (and other unordered
// associative containers known as the collection of Abseil "Swiss tables")
// contain other optimizations that result in both memory and computation
// advantages.
//
// In most cases, your default choice for a hash set should be a set of type
// `flat_hash_set`.
#ifndef ABSL_CONTAINER_FLAT_HASH_SET_H_
#define ABSL_CONTAINER_FLAT_HASH_SET_H_
#include <type_traits>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/base/macros.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
#include "absl/memory/memory.h"
namespace absl {
inline namespace lts_2019_08_08 {
namespace container_internal {
template <typename T>
struct FlatHashSetPolicy;
} // namespace container_internal
// -----------------------------------------------------------------------------
// absl::flat_hash_set
// -----------------------------------------------------------------------------
//
// An `absl::flat_hash_set<T>` is an unordered associative container which has
// been optimized for both speed and memory footprint in most common use cases.
// Its interface is similar to that of `std::unordered_set<T>` with the
// following notable differences:
//
// * Requires keys that are CopyConstructible
// * Supports heterogeneous lookup, through `find()` and `insert()`, provided
// that the set is provided a compatible heterogeneous hashing function and
// equality operator.
// * Invalidates any references and pointers to elements within the table after
// `rehash()`.
// * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash set.
// * Returns `void` from the `erase(iterator)` overload.
//
// By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All
// fundamental and Abseil types that support the `absl::Hash` framework have a
// compatible equality operator for comparing insertions into `flat_hash_map`.
// If your type is not yet supported by the `absl::Hash` framework, see
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
// NOTE: A `flat_hash_set` stores its keys directly inside its implementation
// array to avoid memory indirection. Because a `flat_hash_set` is designed to
// move data when rehashed, set keys will not retain pointer stability. If you
// require pointer stability, consider using
// `absl::flat_hash_set<std::unique_ptr<T>>`. If your type is not moveable and
// you require pointer stability, consider `absl::node_hash_set` instead.
//
// Example:
//
// // Create a flat hash set of three strings
// absl::flat_hash_set<std::string> ducks =
// {"huey", "dewey", "louie"};
//
// // Insert a new element into the flat hash set
// ducks.insert("donald");
//
// // Force a rehash of the flat hash set
// ducks.rehash(0);
//
// // See if "dewey" is present
// if (ducks.contains("dewey")) {
// std::cout << "We found dewey!" << std::endl;
// }
template <class T, class Hash = absl::container_internal::hash_default_hash<T>,
class Eq = absl::container_internal::hash_default_eq<T>,
class Allocator = std::allocator<T>>
class flat_hash_set
: public absl::container_internal::raw_hash_set<
absl::container_internal::FlatHashSetPolicy<T>, Hash, Eq, Allocator> {
using Base = typename flat_hash_set::raw_hash_set;
public:
// Constructors and Assignment Operators
//
// A flat_hash_set supports the same overload set as `std::unordered_map`
// for construction and assignment:
//
// * Default constructor
//
// // No allocation for the table's elements is made.
// absl::flat_hash_set<std::string> set1;
//
// * Initializer List constructor
//
// absl::flat_hash_set<std::string> set2 =
// {{"huey"}, {"dewey"}, {"louie"},};
//
// * Copy constructor
//
// absl::flat_hash_set<std::string> set3(set2);
//
// * Copy assignment operator
//
// // Hash functor and Comparator are copied as well
// absl::flat_hash_set<std::string> set4;
// set4 = set3;
//
// * Move constructor
//
// // Move is guaranteed efficient
// absl::flat_hash_set<std::string> set5(std::move(set4));
//
// * Move assignment operator
//
// // May be efficient if allocators are compatible
// absl::flat_hash_set<std::string> set6;
// set6 = std::move(set5);
//
// * Range constructor
//
// std::vector<std::string> v = {"a", "b"};
// absl::flat_hash_set<std::string> set7(v.begin(), v.end());
flat_hash_set() {}
using Base::Base;
// flat_hash_set::begin()
//
// Returns an iterator to the beginning of the `flat_hash_set`.
using Base::begin;
// flat_hash_set::cbegin()
//
// Returns a const iterator to the beginning of the `flat_hash_set`.
using Base::cbegin;
// flat_hash_set::cend()
//
// Returns a const iterator to the end of the `flat_hash_set`.
using Base::cend;
// flat_hash_set::end()
//
// Returns an iterator to the end of the `flat_hash_set`.
using Base::end;
// flat_hash_set::capacity()
//
// Returns the number of element slots (assigned, deleted, and empty)
// available within the `flat_hash_set`.
//
// NOTE: this member function is particular to `absl::flat_hash_set` and is
// not provided in the `std::unordered_map` API.
using Base::capacity;
// flat_hash_set::empty()
//
// Returns whether or not the `flat_hash_set` is empty.
using Base::empty;
// flat_hash_set::max_size()
//
// Returns the largest theoretical possible number of elements within a
// `flat_hash_set` under current memory constraints. This value can be thought
// of the largest value of `std::distance(begin(), end())` for a
// `flat_hash_set<T>`.
using Base::max_size;
// flat_hash_set::size()
//
// Returns the number of elements currently within the `flat_hash_set`.
using Base::size;
// flat_hash_set::clear()
//
// Removes all elements from the `flat_hash_set`. Invalidates any references,
// pointers, or iterators referring to contained elements.
//
// NOTE: this operation may shrink the underlying buffer. To avoid shrinking
// the underlying buffer call `erase(begin(), end())`.
using Base::clear;
// flat_hash_set::erase()
//
// Erases elements within the `flat_hash_set`. Erasing does not trigger a
// rehash. Overloads are listed below.
//
// void erase(const_iterator pos):
//
// Erases the element at `position` of the `flat_hash_set`, returning
// `void`.
//
// NOTE: returning `void` in this case is different than that of STL
// containers in general and `std::unordered_set` in particular (which
// return an iterator to the element following the erased element). If that
// iterator is needed, simply post increment the iterator:
//
// set.erase(it++);
//
// iterator erase(const_iterator first, const_iterator last):
//
// Erases the elements in the open interval [`first`, `last`), returning an
// iterator pointing to `last`.
//
// size_type erase(const key_type& key):
//
// Erases the element with the matching key, if it exists.
using Base::erase;
// flat_hash_set::insert()
//
// Inserts an element of the specified value into the `flat_hash_set`,
// returning an iterator pointing to the newly inserted element, provided that
// an element with the given key does not already exist. If rehashing occurs
// due to the insertion, all iterators are invalidated. Overloads are listed
// below.
//
// std::pair<iterator,bool> insert(const T& value):
//
// Inserts a value into the `flat_hash_set`. Returns a pair consisting of an
// iterator to the inserted element (or to the element that prevented the
// insertion) and a bool denoting whether the insertion took place.
//
// std::pair<iterator,bool> insert(T&& value):
//
// Inserts a moveable value into the `flat_hash_set`. Returns a pair
// consisting of an iterator to the inserted element (or to the element that
// prevented the insertion) and a bool denoting whether the insertion took
// place.
//
// iterator insert(const_iterator hint, const T& value):
// iterator insert(const_iterator hint, T&& value):
//
// Inserts a value, using the position of `hint` as a non-binding suggestion
// for where to begin the insertion search. Returns an iterator to the
// inserted element, or to the existing element that prevented the
// insertion.
//
// void insert(InputIterator first, InputIterator last):
//
// Inserts a range of values [`first`, `last`).
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently, for `flat_hash_set` we guarantee the
// first match is inserted.
//
// void insert(std::initializer_list<T> ilist):
//
// Inserts the elements within the initializer list `ilist`.
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently within the initializer list, for
// `flat_hash_set` we guarantee the first match is inserted.
using Base::insert;
// flat_hash_set::emplace()
//
// Inserts an element of the specified value by constructing it in-place
// within the `flat_hash_set`, provided that no element with the given key
// already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace;
// flat_hash_set::emplace_hint()
//
// Inserts an element of the specified value by constructing it in-place
// within the `flat_hash_set`, using the position of `hint` as a non-binding
// suggestion for where to begin the insertion search, and only inserts
// provided that no element with the given key already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace_hint;
// flat_hash_set::extract()
//
// Extracts the indicated element, erasing it in the process, and returns it
// as a C++17-compatible node handle. Overloads are listed below.
//
// node_type extract(const_iterator position):
//
// Extracts the element at the indicated position and returns a node handle
// owning that extracted data.
//
// node_type extract(const key_type& x):
//
// Extracts the element with the key matching the passed key value and
// returns a node handle owning that extracted data. If the `flat_hash_set`
// does not contain an element with a matching key, this function returns an
// empty node handle.
using Base::extract;
// flat_hash_set::merge()
//
// Extracts elements from a given `source` flat hash map into this
// `flat_hash_set`. If the destination `flat_hash_set` already contains an
// element with an equivalent key, that element is not extracted.
using Base::merge;
// flat_hash_set::swap(flat_hash_set& other)
//
// Exchanges the contents of this `flat_hash_set` with those of the `other`
// flat hash map, avoiding invocation of any move, copy, or swap operations on
// individual elements.
//
// All iterators and references on the `flat_hash_set` remain valid, excepting
// for the past-the-end iterator, which is invalidated.
//
// `swap()` requires that the flat hash set's hashing and key equivalence
// functions be Swappable, and are exchaged using unqualified calls to
// non-member `swap()`. If the map's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
// to non-member `swap()`; otherwise, the allocators are not swapped.
using Base::swap;
// flat_hash_set::rehash(count)
//
// Rehashes the `flat_hash_set`, setting the number of slots to be at least
// the passed value. If the new number of slots increases the load factor more
// than the current maximum load factor
// (`count` < `size()` / `max_load_factor()`), then the new number of slots
// will be at least `size()` / `max_load_factor()`.
//
// To force a rehash, pass rehash(0).
//
// NOTE: unlike behavior in `std::unordered_set`, references are also
// invalidated upon a `rehash()`.
using Base::rehash;
// flat_hash_set::reserve(count)
//
// Sets the number of slots in the `flat_hash_set` to the number needed to
// accommodate at least `count` total elements without exceeding the current
// maximum load factor, and may rehash the container if needed.
using Base::reserve;
// flat_hash_set::contains()
//
// Determines whether an element comparing equal to the given `key` exists
// within the `flat_hash_set`, returning `true` if so or `false` otherwise.
using Base::contains;
// flat_hash_set::count(const Key& key) const
//
// Returns the number of elements comparing equal to the given `key` within
// the `flat_hash_set`. note that this function will return either `1` or `0`
// since duplicate elements are not allowed within a `flat_hash_set`.
using Base::count;
// flat_hash_set::equal_range()
//
// Returns a closed range [first, last], defined by a `std::pair` of two
// iterators, containing all elements with the passed key in the
// `flat_hash_set`.
using Base::equal_range;
// flat_hash_set::find()
//
// Finds an element with the passed `key` within the `flat_hash_set`.
using Base::find;
// flat_hash_set::bucket_count()
//
// Returns the number of "buckets" within the `flat_hash_set`. Note that
// because a flat hash map contains all elements within its internal storage,
// this value simply equals the current capacity of the `flat_hash_set`.
using Base::bucket_count;
// flat_hash_set::load_factor()
//
// Returns the current load factor of the `flat_hash_set` (the average number
// of slots occupied with a value within the hash map).
using Base::load_factor;
// flat_hash_set::max_load_factor()
//
// Manages the maximum load factor of the `flat_hash_set`. Overloads are
// listed below.
//
// float flat_hash_set::max_load_factor()
//
// Returns the current maximum load factor of the `flat_hash_set`.
//
// void flat_hash_set::max_load_factor(float ml)
//
// Sets the maximum load factor of the `flat_hash_set` to the passed value.
//
// NOTE: This overload is provided only for API compatibility with the STL;
// `flat_hash_set` will ignore any set load factor and manage its rehashing
// internally as an implementation detail.
using Base::max_load_factor;
// flat_hash_set::get_allocator()
//
// Returns the allocator function associated with this `flat_hash_set`.
using Base::get_allocator;
// flat_hash_set::hash_function()
//
// Returns the hashing function used to hash the keys within this
// `flat_hash_set`.
using Base::hash_function;
// flat_hash_set::key_eq()
//
// Returns the function used for comparing keys equality.
using Base::key_eq;
};
namespace container_internal {
template <class T>
struct FlatHashSetPolicy {
using slot_type = T;
using key_type = T;
using init_type = T;
using constant_iterators = std::true_type;
template <class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
absl::allocator_traits<Allocator>::construct(*alloc, slot,
std::forward<Args>(args)...);
}
template <class Allocator>
static void destroy(Allocator* alloc, slot_type* slot) {
absl::allocator_traits<Allocator>::destroy(*alloc, slot);
}
template <class Allocator>
static void transfer(Allocator* alloc, slot_type* new_slot,
slot_type* old_slot) {
construct(alloc, new_slot, std::move(*old_slot));
destroy(alloc, old_slot);
}
static T& element(slot_type* slot) { return *slot; }
template <class F, class... Args>
static decltype(absl::container_internal::DecomposeValue(
std::declval<F>(), std::declval<Args>()...))
apply(F&& f, Args&&... args) {
return absl::container_internal::DecomposeValue(
std::forward<F>(f), std::forward<Args>(args)...);
}
static size_t space_used(const T*) { return 0; }
};
} // namespace container_internal
namespace container_algorithm_internal {
// Specialization of trait in absl/algorithm/container.h
template <class Key, class Hash, class KeyEqual, class Allocator>
struct IsUnorderedContainer<absl::flat_hash_set<Key, Hash, KeyEqual, Allocator>>
: std::true_type {};
} // namespace container_algorithm_internal
} // inline namespace lts_2019_08_08
} // namespace absl
#endif // ABSL_CONTAINER_FLAT_HASH_SET_H_

Some files were not shown because too many files have changed in this diff Show More