Snap for 8750474 from 5076d37ac5828fec183d0817f5dccebac528739a to tm-release

Change-Id: I95aa466e6b50d6cf04d8a54c795c48ae9ad3c86e
diff --git a/common/native/bpf_headers/include/bpf/BpfMap.h b/common/native/bpf_headers/include/bpf/BpfMap.h
index 86c0756..2bee2ee 100644
--- a/common/native/bpf_headers/include/bpf/BpfMap.h
+++ b/common/native/bpf_headers/include/bpf/BpfMap.h
@@ -49,16 +49,20 @@
   protected:
     // flag must be within BPF_OBJ_FLAG_MASK, ie. 0, BPF_F_RDONLY, BPF_F_WRONLY
     BpfMap<Key, Value>(const char* pathname, uint32_t flags) {
-        int map_fd = mapRetrieve(pathname, flags);
-        if (map_fd >= 0) mMapFd.reset(map_fd);
+        mMapFd.reset(mapRetrieve(pathname, flags));
+        if (mMapFd < 0) abort();
+        if (isAtLeastKernelVersion(4, 14, 0)) {
+            if (bpfGetFdKeySize(mMapFd) != sizeof(Key)) abort();
+            if (bpfGetFdValueSize(mMapFd) != sizeof(Value)) abort();
+        }
     }
 
   public:
     explicit BpfMap<Key, Value>(const char* pathname) : BpfMap<Key, Value>(pathname, 0) {}
 
     BpfMap<Key, Value>(bpf_map_type map_type, uint32_t max_entries, uint32_t map_flags = 0) {
-        int map_fd = createMap(map_type, sizeof(Key), sizeof(Value), max_entries, map_flags);
-        if (map_fd >= 0) mMapFd.reset(map_fd);
+        mMapFd.reset(createMap(map_type, sizeof(Key), sizeof(Value), max_entries, map_flags));
+        if (mMapFd < 0) abort();
     }
 
     base::Result<Key> getFirstKey() const {
@@ -99,15 +103,38 @@
         return {};
     }
 
+  protected:
+    [[clang::reinitializes]] base::Result<void> init(const char* path, int fd) {
+        mMapFd.reset(fd);
+        if (mMapFd == -1) {
+            return ErrnoErrorf("Pinned map not accessible or does not exist: ({})", path);
+        }
+        if (isAtLeastKernelVersion(4, 14, 0)) {
+            // Normally we should return an error here instead of calling abort,
+            // but this cannot happen at runtime without a massive code bug (K/V type mismatch)
+            // and as such it's better to just blow the system up and let the developer fix it.
+            // Crashes are much more likely to be noticed than logs and missing functionality.
+            if (bpfGetFdKeySize(mMapFd) != sizeof(Key)) abort();
+            if (bpfGetFdValueSize(mMapFd) != sizeof(Value)) abort();
+        }
+        return {};
+    }
+
+  public:
     // Function that tries to get map from a pinned path.
-    base::Result<void> init(const char* path);
+    [[clang::reinitializes]] base::Result<void> init(const char* path) {
+        return init(path, mapRetrieveRW(path));
+    }
+
 
 #ifdef TEST_BPF_MAP
     // due to Android SELinux limitations which prevent map creation by anyone besides the bpfloader
     // this should only ever be used by test code, it is equivalent to:
     //   .reset(createMap(type, keysize, valuesize, max_entries, map_flags)
     // TODO: derive map_flags from BpfMap vs BpfMapRO
-    base::Result<void> resetMap(bpf_map_type map_type, uint32_t max_entries, uint32_t map_flags = 0) {
+    [[clang::reinitializes]] base::Result<void> resetMap(bpf_map_type map_type,
+                                                         uint32_t max_entries,
+                                                         uint32_t map_flags = 0) {
         int map_fd = createMap(map_type, sizeof(Key), sizeof(Value), max_entries, map_flags);
         if (map_fd < 0) {
              auto err = ErrnoErrorf("Unable to create map.");
@@ -152,14 +179,23 @@
 
     // Move assignment operator
     BpfMap<Key, Value>& operator=(BpfMap<Key, Value>&& other) noexcept {
-        mMapFd = std::move(other.mMapFd);
-        other.reset(-1);
+        if (this != &other) {
+            mMapFd = std::move(other.mMapFd);
+            other.reset();
+        }
         return *this;
     }
 
     void reset(base::unique_fd fd) = delete;
 
-    void reset(int fd) { mMapFd.reset(fd); }
+    [[clang::reinitializes]] void reset(int fd = -1) {
+        mMapFd.reset(fd);
+        if ((fd >= 0) && isAtLeastKernelVersion(4, 14, 0)) {
+            if (bpfGetFdKeySize(mMapFd) != sizeof(Key)) abort();
+            if (bpfGetFdValueSize(mMapFd) != sizeof(Value)) abort();
+            if (bpfGetFdMapFlags(mMapFd) != 0) abort(); // TODO: fix for BpfMapRO
+        }
+    }
 
     bool isValid() const { return mMapFd != -1; }
 
@@ -195,15 +231,6 @@
 };
 
 template <class Key, class Value>
-base::Result<void> BpfMap<Key, Value>::init(const char* path) {
-    mMapFd.reset(mapRetrieveRW(path));
-    if (mMapFd == -1) {
-        return ErrnoErrorf("Pinned map not accessible or does not exist: ({})", path);
-    }
-    return {};
-}
-
-template <class Key, class Value>
 base::Result<void> BpfMap<Key, Value>::iterate(
         const std::function<base::Result<void>(const Key& key, const BpfMap<Key, Value>& map)>&
                 filter) const {
@@ -269,8 +296,15 @@
 template <class Key, class Value>
 class BpfMapRO : public BpfMap<Key, Value> {
   public:
+    BpfMapRO<Key, Value>() {};
+
     explicit BpfMapRO<Key, Value>(const char* pathname)
         : BpfMap<Key, Value>(pathname, BPF_F_RDONLY) {}
+
+    // Function that tries to get map from a pinned path.
+    [[clang::reinitializes]] base::Result<void> init(const char* path) {
+        return BpfMap<Key,Value>::init(path, mapRetrieveRO(path));
+    }
 };
 
 }  // namespace bpf
diff --git a/common/native/bpf_headers/include/bpf/BpfUtils.h b/common/native/bpf_headers/include/bpf/BpfUtils.h
index 8f1b9a2..7801c3e 100644
--- a/common/native/bpf_headers/include/bpf/BpfUtils.h
+++ b/common/native/bpf_headers/include/bpf/BpfUtils.h
@@ -92,7 +92,7 @@
 
 #define KVER(a, b, c) (((a) << 24) + ((b) << 16) + (c))
 
-static inline unsigned kernelVersion() {
+static inline unsigned uncachedKernelVersion() {
     struct utsname buf;
     int ret = uname(&buf);
     if (ret) return 0;
@@ -108,6 +108,11 @@
     return KVER(kver_major, kver_minor, kver_sub);
 }
 
+static inline unsigned kernelVersion() {
+    static unsigned kver = uncachedKernelVersion();
+    return kver;
+}
+
 static inline bool isAtLeastKernelVersion(unsigned major, unsigned minor, unsigned sub) {
     return kernelVersion() >= KVER(major, minor, sub);
 }
diff --git a/common/native/bpf_headers/include/bpf/bpf_helpers.h b/common/native/bpf_headers/include/bpf/bpf_helpers.h
index ac9f9bc..10686a2 100644
--- a/common/native/bpf_headers/include/bpf/bpf_helpers.h
+++ b/common/native/bpf_headers/include/bpf/bpf_helpers.h
@@ -19,6 +19,17 @@
  *                                                                            *
  ******************************************************************************/
 
+// The actual versions of the bpfloader that shipped in various Android releases
+
+// Android P/Q/R: BpfLoader was initially part of netd,
+// this was later split out into a standalone binary, but was unversioned.
+
+// Android S / 12 (api level 31) - added 'tethering' mainline eBPF support
+#define BPFLOADER_S_VERSION 2u
+
+// Android T / 13 Beta 3 (api level 33) - added support for 'netd_shared'
+#define BPFLOADER_T_BETA3_VERSION 13u
+
 /* For mainline module use, you can #define BPFLOADER_{MIN/MAX}_VER
  * before #include "bpf_helpers.h" to change which bpfloaders will
  * process the resulting .o file.
@@ -126,11 +137,12 @@
                 ____btf_map_##name = { }
 
 /* type safe macro to declare a map and related accessor functions */
-#define DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, usr, grp, md)     \
+#define DEFINE_BPF_MAP_EXT(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md,         \
+                           selinux, pindir, share)                                               \
     const struct bpf_map_def SECTION("maps") the_map = {                                         \
             .type = BPF_MAP_TYPE_##TYPE,                                                         \
-            .key_size = sizeof(TypeOfKey),                                                       \
-            .value_size = sizeof(TypeOfValue),                                                   \
+            .key_size = sizeof(KeyType),                                                         \
+            .value_size = sizeof(ValueType),                                                     \
             .max_entries = (num_entries),                                                        \
             .map_flags = 0,                                                                      \
             .uid = (usr),                                                                        \
@@ -140,34 +152,40 @@
             .bpfloader_max_ver = DEFAULT_BPFLOADER_MAX_VER,                                      \
             .min_kver = KVER_NONE,                                                               \
             .max_kver = KVER_INF,                                                                \
+            .selinux_context = selinux,                                                          \
+            .pin_subdir = pindir,                                                                \
+            .shared = share,                                                                     \
     };                                                                                           \
-    BPF_ANNOTATE_KV_PAIR(the_map, TypeOfKey, TypeOfValue);                                       \
+    BPF_ANNOTATE_KV_PAIR(the_map, KeyType, ValueType);                                           \
                                                                                                  \
-    static inline __always_inline __unused TypeOfValue* bpf_##the_map##_lookup_elem(             \
-            const TypeOfKey* k) {                                                                \
+    static inline __always_inline __unused ValueType* bpf_##the_map##_lookup_elem(               \
+            const KeyType* k) {                                                                  \
         return bpf_map_lookup_elem_unsafe(&the_map, k);                                          \
     };                                                                                           \
                                                                                                  \
     static inline __always_inline __unused int bpf_##the_map##_update_elem(                      \
-            const TypeOfKey* k, const TypeOfValue* v, unsigned long long flags) {                \
+            const KeyType* k, const ValueType* v, unsigned long long flags) {                    \
         return bpf_map_update_elem_unsafe(&the_map, k, v, flags);                                \
     };                                                                                           \
                                                                                                  \
-    static inline __always_inline __unused int bpf_##the_map##_delete_elem(const TypeOfKey* k) { \
+    static inline __always_inline __unused int bpf_##the_map##_delete_elem(const KeyType* k) {   \
         return bpf_map_delete_elem_unsafe(&the_map, k);                                          \
     };
 
-#define DEFINE_BPF_MAP(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
-    DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, AID_ROOT, 0600)
+#define DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md) \
+    DEFINE_BPF_MAP_EXT(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md, "", "", false)
 
-#define DEFINE_BPF_MAP_GWO(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, gid) \
-    DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, gid, 0620)
+#define DEFINE_BPF_MAP(the_map, TYPE, KeyType, ValueType, num_entries) \
+    DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, AID_ROOT, AID_ROOT, 0600)
 
-#define DEFINE_BPF_MAP_GRO(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, gid) \
-    DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, gid, 0640)
+#define DEFINE_BPF_MAP_GWO(the_map, TYPE, KeyType, ValueType, num_entries, gid) \
+    DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, AID_ROOT, gid, 0620)
 
-#define DEFINE_BPF_MAP_GRW(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, gid) \
-    DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, gid, 0660)
+#define DEFINE_BPF_MAP_GRO(the_map, TYPE, KeyType, ValueType, num_entries, gid) \
+    DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, AID_ROOT, gid, 0640)
+
+#define DEFINE_BPF_MAP_GRW(the_map, TYPE, KeyType, ValueType, num_entries, gid) \
+    DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, AID_ROOT, gid, 0660)
 
 static int (*bpf_probe_read)(void* dst, int size, void* unsafe_ptr) = (void*) BPF_FUNC_probe_read;
 static int (*bpf_probe_read_str)(void* dst, int size, void* unsafe_ptr) = (void*) BPF_FUNC_probe_read_str;
@@ -178,8 +196,8 @@
 static unsigned long long (*bpf_get_current_uid_gid)(void) = (void*) BPF_FUNC_get_current_uid_gid;
 static unsigned long long (*bpf_get_smp_processor_id)(void) = (void*) BPF_FUNC_get_smp_processor_id;
 
-#define DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \
-                                       opt)                                                        \
+#define DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, opt,       \
+                            selinux, pindir)                                                       \
     const struct bpf_prog_def SECTION("progs") the_prog##_def = {                                  \
             .uid = (prog_uid),                                                                     \
             .gid = (prog_gid),                                                                     \
@@ -188,10 +206,16 @@
             .optional = (opt),                                                                     \
             .bpfloader_min_ver = DEFAULT_BPFLOADER_MIN_VER,                                        \
             .bpfloader_max_ver = DEFAULT_BPFLOADER_MAX_VER,                                        \
+            .selinux_context = selinux,                                                            \
+            .pin_subdir = pindir,                                                                  \
     };                                                                                             \
     SECTION(SECTION_NAME)                                                                          \
     int the_prog
 
+#define DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \
+                                       opt) \
+    DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, opt, "", "")
+
 // Programs (here used in the sense of functions/sections) marked optional are allowed to fail
 // to load (for example due to missing kernel patches).
 // The bpfloader will just ignore these failures and continue processing the next section.
diff --git a/common/native/bpf_headers/include/bpf/bpf_map_def.h b/common/native/bpf_headers/include/bpf/bpf_map_def.h
index 1371668..14a0295 100644
--- a/common/native/bpf_headers/include/bpf/bpf_map_def.h
+++ b/common/native/bpf_headers/include/bpf/bpf_map_def.h
@@ -111,6 +111,15 @@
 // BPF wants 8, but 32-bit x86 wants 4
 //_Static_assert(_Alignof(unsigned long long) == 8, "_Alignof unsigned long long != 8");
 
+// Length of strings (incl. selinux_context and pin_subdir)
+// in the bpf_map_def and bpf_prog_def structs.
+//
+// WARNING: YOU CANNOT *EVER* CHANGE THESE
+// as this would affect the structure size in backwards incompatible ways
+// and break mainline module loading on older Android T devices
+#define BPF_SELINUX_CONTEXT_CHAR_ARRAY_SIZE 32
+#define BPF_PIN_SUBDIR_CHAR_ARRAY_SIZE 32
+
 /*
  * Map structure to be used by Android eBPF C programs. The Android eBPF loader
  * uses this structure from eBPF object to create maps at boot time.
@@ -142,14 +151,33 @@
     unsigned int bpfloader_min_ver;  // if missing, defaults to 0, ie. v0.0
     unsigned int bpfloader_max_ver;  // if missing, defaults to 0x10000, ie. v1.0
 
-    // The following fields were added in version 0.2
+    // The following fields were added in version 0.2 (S)
     // kernelVersion() must be >= min_kver and < max_kver
     unsigned int min_kver;
     unsigned int max_kver;
+
+    // The following fields were added in version 0.18 (T)
+    //
+    // These are fixed length strings, padded with null bytes
+    //
+    // Warning: supported values depend on .o location
+    // (additionally a newer Android OS and/or bpfloader may support more values)
+    //
+    // overrides default selinux context (which is based on pin subdir)
+    char selinux_context[BPF_SELINUX_CONTEXT_CHAR_ARRAY_SIZE];
+    //
+    // overrides default prefix (which is based on .o location)
+    char pin_subdir[BPF_PIN_SUBDIR_CHAR_ARRAY_SIZE];
+
+    bool shared;  // use empty string as 'file' component of pin path - allows cross .o map sharing
+    char pad0[3];  // manually pad up to 4 byte alignment, may be used for extensions in the future
 };
 
+_Static_assert(sizeof(((struct bpf_map_def *)0)->selinux_context) == 32, "must be 32 bytes");
+_Static_assert(sizeof(((struct bpf_map_def *)0)->pin_subdir) == 32, "must be 32 bytes");
+
 // This needs to be updated whenever the above structure definition is expanded.
-_Static_assert(sizeof(struct bpf_map_def) == 48, "sizeof struct bpf_map_def != 48");
+_Static_assert(sizeof(struct bpf_map_def) == 116, "sizeof struct bpf_map_def != 116");
 _Static_assert(__alignof__(struct bpf_map_def) == 4, "__alignof__ struct bpf_map_def != 4");
 _Static_assert(_Alignof(struct bpf_map_def) == 4, "_Alignof struct bpf_map_def != 4");
 
@@ -168,10 +196,15 @@
     unsigned int bpfloader_min_ver;  // if missing, defaults to 0, ie. v0.0
     unsigned int bpfloader_max_ver;  // if missing, defaults to 0x10000, ie. v1.0
 
-    // No new fields in version 0.2
+    // The following fields were added in version 0.18, see description up above in bpf_map_def
+    char selinux_context[BPF_SELINUX_CONTEXT_CHAR_ARRAY_SIZE];
+    char pin_subdir[BPF_PIN_SUBDIR_CHAR_ARRAY_SIZE];
 };
 
+_Static_assert(sizeof(((struct bpf_prog_def *)0)->selinux_context) == 32, "must be 32 bytes");
+_Static_assert(sizeof(((struct bpf_prog_def *)0)->pin_subdir) == 32, "must be 32 bytes");
+
 // This needs to be updated whenever the above structure definition is expanded.
-_Static_assert(sizeof(struct bpf_prog_def) == 28, "sizeof struct bpf_prog_def != 28");
+_Static_assert(sizeof(struct bpf_prog_def) == 92, "sizeof struct bpf_prog_def != 92");
 _Static_assert(__alignof__(struct bpf_prog_def) == 4, "__alignof__ struct bpf_prog_def != 4");
 _Static_assert(_Alignof(struct bpf_prog_def) == 4, "_Alignof struct bpf_prog_def != 4");
diff --git a/common/native/bpf_syscall_wrappers/include/BpfSyscallWrappers.h b/common/native/bpf_syscall_wrappers/include/BpfSyscallWrappers.h
index abf83da..4b29c44 100644
--- a/common/native/bpf_syscall_wrappers/include/BpfSyscallWrappers.h
+++ b/common/native/bpf_syscall_wrappers/include/BpfSyscallWrappers.h
@@ -150,6 +150,36 @@
                                 });
 }
 
+// requires 4.14+ kernel
+
+#define DEFINE_BPF_GET_FD_INFO(NAME, FIELD) \
+inline int bpfGetFd ## NAME(const BPF_FD_TYPE map_fd) { \
+    struct bpf_map_info map_info = {}; \
+    union bpf_attr attr = { .info = { \
+        .bpf_fd = BPF_FD_TO_U32(map_fd), \
+        .info_len = sizeof(map_info), \
+        .info = ptr_to_u64(&map_info), \
+    }}; \
+    int rv = bpf(BPF_OBJ_GET_INFO_BY_FD, attr); \
+    if (rv) return rv; \
+    if (attr.info.info_len < offsetof(bpf_map_info, FIELD) + sizeof(map_info.FIELD)) { \
+        errno = EOPNOTSUPP; \
+        return -1; \
+    }; \
+    return map_info.FIELD; \
+}
+
+// All 6 of these fields are already present in Linux v4.14 (even ACK 4.14-P)
+// while BPF_OBJ_GET_INFO_BY_FD is not implemented at all in v4.9 (even ACK 4.9-Q)
+DEFINE_BPF_GET_FD_INFO(MapType, type)            // int bpfGetFdMapType(const BPF_FD_TYPE map_fd)
+DEFINE_BPF_GET_FD_INFO(MapId, id)                // int bpfGetFdMapId(const BPF_FD_TYPE map_fd)
+DEFINE_BPF_GET_FD_INFO(KeySize, key_size)        // int bpfGetFdKeySize(const BPF_FD_TYPE map_fd)
+DEFINE_BPF_GET_FD_INFO(ValueSize, value_size)    // int bpfGetFdValueSize(const BPF_FD_TYPE map_fd)
+DEFINE_BPF_GET_FD_INFO(MaxEntries, max_entries)  // int bpfGetFdMaxEntries(const BPF_FD_TYPE map_fd)
+DEFINE_BPF_GET_FD_INFO(MapFlags, map_flags)      // int bpfGetFdMapFlags(const BPF_FD_TYPE map_fd)
+
+#undef DEFINE_BPF_GET_FD_INFO
+
 }  // namespace bpf
 }  // namespace android
 
diff --git a/common/native/tcutils/kernelversion.h b/common/native/tcutils/kernelversion.h
index 3be1ad2..492444a 100644
--- a/common/native/tcutils/kernelversion.h
+++ b/common/native/tcutils/kernelversion.h
@@ -32,7 +32,7 @@
 
 namespace android {
 
-static inline unsigned kernelVersion() {
+static inline unsigned uncachedKernelVersion() {
   struct utsname buf;
   int ret = uname(&buf);
   if (ret)
@@ -51,6 +51,11 @@
   return KVER(kver_major, kver_minor, kver_sub);
 }
 
+static unsigned kernelVersion() {
+  static unsigned kver = uncachedKernelVersion();
+  return kver;
+}
+
 static inline bool isAtLeastKernelVersion(unsigned major, unsigned minor,
                                           unsigned sub) {
   return kernelVersion() >= KVER(major, minor, sub);