BPF.cc 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056
  1. /*
  2. * Copyright (c) 2016 Facebook, Inc.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <linux/bpf.h>
  17. #include <linux/perf_event.h>
  18. #include <unistd.h>
  19. #include <cstdio>
  20. #include <cstring>
  21. #include <exception>
  22. #include <fcntl.h>
  23. #include <iostream>
  24. #include <memory>
  25. #include <sstream>
  26. #include <sys/stat.h>
  27. #include <sys/types.h>
  28. #include <utility>
  29. #include <vector>
  30. #include "bcc_exception.h"
  31. #include "bcc_elf.h"
  32. #include "bcc_syms.h"
  33. #include "bpf_module.h"
  34. #include "common.h"
  35. #include "libbpf.h"
  36. #include "perf_reader.h"
  37. #include "syms.h"
  38. #include "table_storage.h"
  39. #include "usdt.h"
  40. #include "BPF.h"
  41. namespace {
  42. /*
  43. * Kernels ~4.20 and later support specifying the ref_ctr_offset as an argument
  44. * to attaching a uprobe, which negates the need to seek to this memory offset
  45. * in userspace to manage semaphores, as the kernel will do it for us. This
  46. * helper function checks if this support is available by reading the uprobe
  47. * format for this value, added in a6ca88b241d5e929e6e60b12ad8cd288f0ffa
  48. */
  49. bool uprobe_ref_ctr_supported() {
  50. const char *ref_ctr_pmu_path =
  51. "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset";
  52. const char *ref_ctr_pmu_expected = "config:32-63\0";
  53. char ref_ctr_pmu_fmt[64]; // in Linux source this buffer is compared vs
  54. // PAGE_SIZE, but 64 is probably ample
  55. int fd = open(ref_ctr_pmu_path, O_RDONLY);
  56. if (fd < 0)
  57. return false;
  58. int ret = read(fd, ref_ctr_pmu_fmt, sizeof(ref_ctr_pmu_fmt));
  59. close(fd);
  60. if (ret < 0) {
  61. return false;
  62. }
  63. if (strncmp(ref_ctr_pmu_expected, ref_ctr_pmu_fmt,
  64. strlen(ref_ctr_pmu_expected)) == 0) {
  65. return true;
  66. }
  67. return false;
  68. }
  69. } // namespace
  70. namespace ebpf {
  71. std::string uint_to_hex(uint64_t value) {
  72. std::stringstream ss;
  73. ss << std::hex << value;
  74. return ss.str();
  75. }
  76. std::string sanitize_str(std::string str, bool (*validator)(char),
  77. char replacement = '_') {
  78. for (size_t i = 0; i < str.length(); i++)
  79. if (!validator(str[i]))
  80. str[i] = replacement;
  81. return str;
  82. }
  83. StatusTuple BPF::init_usdt(const USDT& usdt) {
  84. USDT u(usdt);
  85. StatusTuple init_stp = u.init();
  86. if (!init_stp.ok()) {
  87. return init_stp;
  88. }
  89. usdt_.push_back(std::move(u));
  90. all_bpf_program_ += usdt_.back().program_text_;
  91. return StatusTuple::OK();
  92. }
  93. void BPF::init_fail_reset() {
  94. usdt_.clear();
  95. all_bpf_program_ = "";
  96. }
  97. StatusTuple BPF::init(const std::string& bpf_program,
  98. const std::vector<std::string>& cflags,
  99. const std::vector<USDT>& usdt) {
  100. usdt_.reserve(usdt.size());
  101. for (const auto& u : usdt) {
  102. StatusTuple init_stp = init_usdt(u);
  103. if (!init_stp.ok()) {
  104. init_fail_reset();
  105. return init_stp;
  106. }
  107. }
  108. std::vector<const char*> flags;
  109. for (const auto& c: cflags)
  110. flags.push_back(c.c_str());
  111. all_bpf_program_ += bpf_program;
  112. if (bpf_module_->load_string(all_bpf_program_,
  113. flags.data(),
  114. flags.size()) != 0) {
  115. init_fail_reset();
  116. return StatusTuple(-1, "Unable to initialize BPF program");
  117. }
  118. return StatusTuple::OK();
  119. };
  120. BPF::~BPF() {
  121. auto res = detach_all();
  122. if (!res.ok())
  123. std::cerr << "Failed to detach all probes on destruction: " << std::endl
  124. << res.msg() << std::endl;
  125. bcc_free_buildsymcache(bsymcache_);
  126. bsymcache_ = NULL;
  127. }
  128. StatusTuple BPF::detach_all() {
  129. bool has_error = false;
  130. std::string error_msg;
  131. for (auto& it : kprobes_) {
  132. auto res = detach_kprobe_event(it.first, it.second);
  133. if (!res.ok()) {
  134. error_msg += "Failed to detach kprobe event " + it.first + ": ";
  135. error_msg += res.msg() + "\n";
  136. has_error = true;
  137. }
  138. }
  139. for (auto& it : uprobes_) {
  140. auto res = detach_uprobe_event(it.first, it.second);
  141. if (!res.ok()) {
  142. error_msg += "Failed to detach uprobe event " + it.first + ": ";
  143. error_msg += res.msg() + "\n";
  144. has_error = true;
  145. }
  146. }
  147. for (auto& it : tracepoints_) {
  148. auto res = detach_tracepoint_event(it.first, it.second);
  149. if (!res.ok()) {
  150. error_msg += "Failed to detach Tracepoint " + it.first + ": ";
  151. error_msg += res.msg() + "\n";
  152. has_error = true;
  153. }
  154. }
  155. for (auto& it : raw_tracepoints_) {
  156. auto res = detach_raw_tracepoint_event(it.first, it.second);
  157. if (!res.ok()) {
  158. error_msg += "Failed to detach Raw tracepoint " + it.first + ": ";
  159. error_msg += res.msg() + "\n";
  160. has_error = true;
  161. }
  162. }
  163. for (auto& it : perf_buffers_) {
  164. auto res = it.second->close_all_cpu();
  165. if (!res.ok()) {
  166. error_msg += "Failed to close perf buffer " + it.first + ": ";
  167. error_msg += res.msg() + "\n";
  168. has_error = true;
  169. }
  170. delete it.second;
  171. }
  172. for (auto& it : perf_event_arrays_) {
  173. auto res = it.second->close_all_cpu();
  174. if (!res.ok()) {
  175. error_msg += "Failed to close perf event array " + it.first + ": ";
  176. error_msg += res.msg() + "\n";
  177. has_error = true;
  178. }
  179. delete it.second;
  180. }
  181. for (auto& it : perf_events_) {
  182. auto res = detach_perf_event_all_cpu(it.second);
  183. if (!res.ok()) {
  184. error_msg += res.msg() + "\n";
  185. has_error = true;
  186. }
  187. }
  188. for (auto& it : funcs_) {
  189. int res = close(it.second);
  190. if (res != 0) {
  191. error_msg += "Failed to unload BPF program for " + it.first + ": ";
  192. error_msg += std::string(std::strerror(errno)) + "\n";
  193. has_error = true;
  194. }
  195. }
  196. if (has_error)
  197. return StatusTuple(-1, error_msg);
  198. else
  199. return StatusTuple::OK();
  200. }
  201. StatusTuple BPF::attach_kprobe(const std::string& kernel_func,
  202. const std::string& probe_func,
  203. uint64_t kernel_func_offset,
  204. bpf_probe_attach_type attach_type,
  205. int maxactive) {
  206. std::string probe_event = get_kprobe_event(kernel_func, attach_type);
  207. if (kprobes_.find(probe_event) != kprobes_.end())
  208. return StatusTuple(-1, "kprobe %s already attached", probe_event.c_str());
  209. int probe_fd;
  210. TRY2(load_func(probe_func, BPF_PROG_TYPE_KPROBE, probe_fd));
  211. int res_fd = bpf_attach_kprobe(probe_fd, attach_type, probe_event.c_str(),
  212. kernel_func.c_str(), kernel_func_offset,
  213. maxactive);
  214. if (res_fd < 0) {
  215. TRY2(unload_func(probe_func));
  216. return StatusTuple(-1, "Unable to attach %skprobe for %s using %s",
  217. attach_type_debug(attach_type).c_str(),
  218. kernel_func.c_str(), probe_func.c_str());
  219. }
  220. open_probe_t p = {};
  221. p.perf_event_fd = res_fd;
  222. p.func = probe_func;
  223. kprobes_[probe_event] = std::move(p);
  224. return StatusTuple::OK();
  225. }
  226. StatusTuple BPF::attach_uprobe(const std::string& binary_path,
  227. const std::string& symbol,
  228. const std::string& probe_func,
  229. uint64_t symbol_addr,
  230. bpf_probe_attach_type attach_type, pid_t pid,
  231. uint64_t symbol_offset,
  232. uint32_t ref_ctr_offset) {
  233. if (symbol_addr != 0 && symbol_offset != 0)
  234. return StatusTuple(-1,
  235. "Attachng uprobe with addr %lx and offset %lx is not supported",
  236. symbol_addr, symbol_offset);
  237. std::string module;
  238. uint64_t offset;
  239. TRY2(check_binary_symbol(binary_path, symbol, symbol_addr, module, offset, pid,
  240. symbol_offset));
  241. std::string probe_event = get_uprobe_event(module, offset, attach_type, pid);
  242. if (uprobes_.find(probe_event) != uprobes_.end())
  243. return StatusTuple(-1, "uprobe %s already attached", probe_event.c_str());
  244. int probe_fd;
  245. TRY2(load_func(probe_func, BPF_PROG_TYPE_KPROBE, probe_fd));
  246. int res_fd = bpf_attach_uprobe(probe_fd, attach_type, probe_event.c_str(),
  247. module.c_str(), offset, pid,
  248. ref_ctr_offset);
  249. if (res_fd < 0) {
  250. TRY2(unload_func(probe_func));
  251. return StatusTuple(
  252. -1,
  253. "Unable to attach %suprobe for binary %s symbol %s addr %lx "
  254. "offset %lx using %s\n",
  255. attach_type_debug(attach_type).c_str(), binary_path.c_str(),
  256. symbol.c_str(), symbol_addr, symbol_offset, probe_func.c_str());
  257. }
  258. open_probe_t p = {};
  259. p.perf_event_fd = res_fd;
  260. p.func = probe_func;
  261. uprobes_[probe_event] = std::move(p);
  262. return StatusTuple::OK();
  263. }
  264. StatusTuple BPF::attach_usdt_without_validation(const USDT& u, pid_t pid) {
  265. auto& probe = *static_cast<::USDT::Probe*>(u.probe_.get());
  266. if (!uprobe_ref_ctr_supported() && !probe.enable(u.probe_func_))
  267. return StatusTuple(-1, "Unable to enable USDT %s", u.print_name().c_str());
  268. bool failed = false;
  269. std::string err_msg;
  270. int cnt = 0;
  271. for (const auto& loc : probe.locations_) {
  272. auto res = attach_uprobe(loc.bin_path_, std::string(), u.probe_func_,
  273. loc.address_, BPF_PROBE_ENTRY, pid, 0,
  274. probe.semaphore_offset());
  275. if (!res.ok()) {
  276. failed = true;
  277. err_msg += "USDT " + u.print_name() + " at " + loc.bin_path_ +
  278. " address " + std::to_string(loc.address_);
  279. err_msg += ": " + res.msg() + "\n";
  280. break;
  281. }
  282. cnt++;
  283. }
  284. if (failed) {
  285. for (int i = 0; i < cnt; i++) {
  286. auto res = detach_uprobe(probe.locations_[i].bin_path_, std::string(),
  287. probe.locations_[i].address_, BPF_PROBE_ENTRY, pid);
  288. if (!res.ok())
  289. err_msg += "During clean up: " + res.msg() + "\n";
  290. }
  291. return StatusTuple(-1, err_msg);
  292. } else {
  293. return StatusTuple::OK();
  294. }
  295. }
  296. StatusTuple BPF::attach_usdt(const USDT& usdt, pid_t pid) {
  297. for (const auto& u : usdt_) {
  298. if (u == usdt) {
  299. return attach_usdt_without_validation(u, pid);
  300. }
  301. }
  302. return StatusTuple(-1, "USDT %s not found", usdt.print_name().c_str());
  303. }
  304. StatusTuple BPF::attach_usdt_all() {
  305. for (const auto& u : usdt_) {
  306. auto res = attach_usdt_without_validation(u, -1);
  307. if (!res.ok()) {
  308. return res;
  309. }
  310. }
  311. return StatusTuple::OK();
  312. }
  313. StatusTuple BPF::attach_tracepoint(const std::string& tracepoint,
  314. const std::string& probe_func) {
  315. if (tracepoints_.find(tracepoint) != tracepoints_.end())
  316. return StatusTuple(-1, "Tracepoint %s already attached",
  317. tracepoint.c_str());
  318. auto pos = tracepoint.find(":");
  319. if ((pos == std::string::npos) || (pos != tracepoint.rfind(":")))
  320. return StatusTuple(-1, "Unable to parse Tracepoint %s", tracepoint.c_str());
  321. std::string tp_category = tracepoint.substr(0, pos);
  322. std::string tp_name = tracepoint.substr(pos + 1);
  323. int probe_fd;
  324. TRY2(load_func(probe_func, BPF_PROG_TYPE_TRACEPOINT, probe_fd));
  325. int res_fd =
  326. bpf_attach_tracepoint(probe_fd, tp_category.c_str(), tp_name.c_str());
  327. if (res_fd < 0) {
  328. TRY2(unload_func(probe_func));
  329. return StatusTuple(-1, "Unable to attach Tracepoint %s using %s",
  330. tracepoint.c_str(), probe_func.c_str());
  331. }
  332. open_probe_t p = {};
  333. p.perf_event_fd = res_fd;
  334. p.func = probe_func;
  335. tracepoints_[tracepoint] = std::move(p);
  336. return StatusTuple::OK();
  337. }
  338. StatusTuple BPF::attach_raw_tracepoint(const std::string& tracepoint, const std::string& probe_func) {
  339. if (raw_tracepoints_.find(tracepoint) != raw_tracepoints_.end())
  340. return StatusTuple(-1, "Raw tracepoint %s already attached",
  341. tracepoint.c_str());
  342. int probe_fd;
  343. TRY2(load_func(probe_func, BPF_PROG_TYPE_RAW_TRACEPOINT, probe_fd));
  344. int res_fd = bpf_attach_raw_tracepoint(probe_fd, tracepoint.c_str());
  345. if (res_fd < 0) {
  346. TRY2(unload_func(probe_func));
  347. return StatusTuple(-1, "Unable to attach Raw tracepoint %s using %s",
  348. tracepoint.c_str(), probe_func.c_str());
  349. }
  350. open_probe_t p = {};
  351. p.perf_event_fd = res_fd;
  352. p.func = probe_func;
  353. raw_tracepoints_[tracepoint] = std::move(p);
  354. return StatusTuple::OK();
  355. }
  356. StatusTuple BPF::attach_perf_event(uint32_t ev_type, uint32_t ev_config,
  357. const std::string& probe_func,
  358. uint64_t sample_period, uint64_t sample_freq,
  359. pid_t pid, int cpu, int group_fd) {
  360. auto ev_pair = std::make_pair(ev_type, ev_config);
  361. if (perf_events_.find(ev_pair) != perf_events_.end())
  362. return StatusTuple(-1, "Perf event type %d config %d already attached",
  363. ev_type, ev_config);
  364. int probe_fd;
  365. TRY2(load_func(probe_func, BPF_PROG_TYPE_PERF_EVENT, probe_fd));
  366. std::vector<int> cpus;
  367. if (cpu >= 0)
  368. cpus.push_back(cpu);
  369. else
  370. cpus = get_online_cpus();
  371. auto fds = new std::vector<std::pair<int, int>>();
  372. fds->reserve(cpus.size());
  373. for (int i : cpus) {
  374. int fd = bpf_attach_perf_event(probe_fd, ev_type, ev_config, sample_period,
  375. sample_freq, pid, i, group_fd);
  376. if (fd < 0) {
  377. for (const auto& it : *fds)
  378. close(it.second);
  379. delete fds;
  380. TRY2(unload_func(probe_func));
  381. return StatusTuple(-1, "Failed to attach perf event type %d config %d",
  382. ev_type, ev_config);
  383. }
  384. fds->emplace_back(i, fd);
  385. }
  386. open_probe_t p = {};
  387. p.func = probe_func;
  388. p.per_cpu_fd = fds;
  389. perf_events_[ev_pair] = std::move(p);
  390. return StatusTuple::OK();
  391. }
  392. StatusTuple BPF::attach_perf_event_raw(void* perf_event_attr,
  393. const std::string& probe_func, pid_t pid,
  394. int cpu, int group_fd,
  395. unsigned long extra_flags) {
  396. auto attr = static_cast<struct perf_event_attr*>(perf_event_attr);
  397. auto ev_pair = std::make_pair(attr->type, attr->config);
  398. if (perf_events_.find(ev_pair) != perf_events_.end())
  399. return StatusTuple(-1, "Perf event type %d config %d already attached",
  400. attr->type, attr->config);
  401. int probe_fd;
  402. TRY2(load_func(probe_func, BPF_PROG_TYPE_PERF_EVENT, probe_fd));
  403. std::vector<int> cpus;
  404. if (cpu >= 0)
  405. cpus.push_back(cpu);
  406. else
  407. cpus = get_online_cpus();
  408. auto fds = new std::vector<std::pair<int, int>>();
  409. fds->reserve(cpus.size());
  410. for (int i : cpus) {
  411. int fd = bpf_attach_perf_event_raw(probe_fd, attr, pid, i, group_fd,
  412. extra_flags);
  413. if (fd < 0) {
  414. for (const auto& it : *fds)
  415. close(it.second);
  416. delete fds;
  417. TRY2(unload_func(probe_func));
  418. return StatusTuple(-1, "Failed to attach perf event type %d config %d",
  419. attr->type, attr->config);
  420. }
  421. fds->emplace_back(i, fd);
  422. }
  423. open_probe_t p = {};
  424. p.func = probe_func;
  425. p.per_cpu_fd = fds;
  426. perf_events_[ev_pair] = std::move(p);
  427. return StatusTuple::OK();
  428. }
  429. StatusTuple BPF::detach_kprobe(const std::string& kernel_func,
  430. bpf_probe_attach_type attach_type) {
  431. std::string event = get_kprobe_event(kernel_func, attach_type);
  432. auto it = kprobes_.find(event);
  433. if (it == kprobes_.end())
  434. return StatusTuple(-1, "No open %skprobe for %s",
  435. attach_type_debug(attach_type).c_str(),
  436. kernel_func.c_str());
  437. TRY2(detach_kprobe_event(it->first, it->second));
  438. kprobes_.erase(it);
  439. return StatusTuple::OK();
  440. }
  441. StatusTuple BPF::detach_uprobe(const std::string& binary_path,
  442. const std::string& symbol, uint64_t symbol_addr,
  443. bpf_probe_attach_type attach_type, pid_t pid,
  444. uint64_t symbol_offset) {
  445. std::string module;
  446. uint64_t offset;
  447. TRY2(check_binary_symbol(binary_path, symbol, symbol_addr, module, offset, pid,
  448. symbol_offset));
  449. std::string event = get_uprobe_event(module, offset, attach_type, pid);
  450. auto it = uprobes_.find(event);
  451. if (it == uprobes_.end())
  452. return StatusTuple(-1, "No open %suprobe for binary %s symbol %s addr %lx",
  453. attach_type_debug(attach_type).c_str(),
  454. binary_path.c_str(), symbol.c_str(), symbol_addr);
  455. TRY2(detach_uprobe_event(it->first, it->second));
  456. uprobes_.erase(it);
  457. return StatusTuple::OK();
  458. }
  459. StatusTuple BPF::detach_usdt_without_validation(const USDT& u, pid_t pid) {
  460. auto& probe = *static_cast<::USDT::Probe*>(u.probe_.get());
  461. bool failed = false;
  462. std::string err_msg;
  463. for (const auto& loc : probe.locations_) {
  464. auto res = detach_uprobe(loc.bin_path_, std::string(), loc.address_,
  465. BPF_PROBE_ENTRY, pid);
  466. if (!res.ok()) {
  467. failed = true;
  468. err_msg += "USDT " + u.print_name() + " at " + loc.bin_path_ +
  469. " address " + std::to_string(loc.address_);
  470. err_msg += ": " + res.msg() + "\n";
  471. }
  472. }
  473. if (!uprobe_ref_ctr_supported() && !probe.disable()) {
  474. failed = true;
  475. err_msg += "Unable to disable USDT " + u.print_name();
  476. }
  477. if (failed)
  478. return StatusTuple(-1, err_msg);
  479. else
  480. return StatusTuple::OK();
  481. }
  482. StatusTuple BPF::detach_usdt(const USDT& usdt, pid_t pid) {
  483. for (const auto& u : usdt_) {
  484. if (u == usdt) {
  485. return detach_usdt_without_validation(u, pid);
  486. }
  487. }
  488. return StatusTuple(-1, "USDT %s not found", usdt.print_name().c_str());
  489. }
  490. StatusTuple BPF::detach_usdt_all() {
  491. for (const auto& u : usdt_) {
  492. auto ret = detach_usdt_without_validation(u, -1);
  493. if (!ret.ok()) {
  494. return ret;
  495. }
  496. }
  497. return StatusTuple::OK();
  498. }
  499. StatusTuple BPF::detach_tracepoint(const std::string& tracepoint) {
  500. auto it = tracepoints_.find(tracepoint);
  501. if (it == tracepoints_.end())
  502. return StatusTuple(-1, "No open Tracepoint %s", tracepoint.c_str());
  503. TRY2(detach_tracepoint_event(it->first, it->second));
  504. tracepoints_.erase(it);
  505. return StatusTuple::OK();
  506. }
  507. StatusTuple BPF::detach_raw_tracepoint(const std::string& tracepoint) {
  508. auto it = raw_tracepoints_.find(tracepoint);
  509. if (it == raw_tracepoints_.end())
  510. return StatusTuple(-1, "No open Raw tracepoint %s", tracepoint.c_str());
  511. TRY2(detach_raw_tracepoint_event(it->first, it->second));
  512. raw_tracepoints_.erase(it);
  513. return StatusTuple::OK();
  514. }
  515. StatusTuple BPF::detach_perf_event(uint32_t ev_type, uint32_t ev_config) {
  516. auto it = perf_events_.find(std::make_pair(ev_type, ev_config));
  517. if (it == perf_events_.end())
  518. return StatusTuple(-1, "Perf Event type %d config %d not attached", ev_type,
  519. ev_config);
  520. TRY2(detach_perf_event_all_cpu(it->second));
  521. perf_events_.erase(it);
  522. return StatusTuple::OK();
  523. }
  524. StatusTuple BPF::detach_perf_event_raw(void* perf_event_attr) {
  525. auto attr = static_cast<struct perf_event_attr*>(perf_event_attr);
  526. return detach_perf_event(attr->type, attr->config);
  527. }
  528. StatusTuple BPF::open_perf_event(const std::string& name, uint32_t type,
  529. uint64_t config, int pid) {
  530. if (perf_event_arrays_.find(name) == perf_event_arrays_.end()) {
  531. TableStorage::iterator it;
  532. if (!bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it))
  533. return StatusTuple(-1, "open_perf_event: unable to find table_storage %s",
  534. name.c_str());
  535. perf_event_arrays_[name] = new BPFPerfEventArray(it->second);
  536. }
  537. auto table = perf_event_arrays_[name];
  538. TRY2(table->open_all_cpu(type, config, pid));
  539. return StatusTuple::OK();
  540. }
  541. StatusTuple BPF::close_perf_event(const std::string& name) {
  542. auto it = perf_event_arrays_.find(name);
  543. if (it == perf_event_arrays_.end())
  544. return StatusTuple(-1, "Perf Event for %s not open", name.c_str());
  545. TRY2(it->second->close_all_cpu());
  546. return StatusTuple::OK();
  547. }
  548. StatusTuple BPF::open_perf_buffer(const std::string& name,
  549. perf_reader_raw_cb cb,
  550. perf_reader_lost_cb lost_cb, void* cb_cookie,
  551. int page_cnt) {
  552. if (perf_buffers_.find(name) == perf_buffers_.end()) {
  553. TableStorage::iterator it;
  554. if (!bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it))
  555. return StatusTuple(-1,
  556. "open_perf_buffer: unable to find table_storage %s",
  557. name.c_str());
  558. perf_buffers_[name] = new BPFPerfBuffer(it->second);
  559. }
  560. if ((page_cnt & (page_cnt - 1)) != 0)
  561. return StatusTuple(-1, "open_perf_buffer page_cnt must be a power of two");
  562. auto table = perf_buffers_[name];
  563. TRY2(table->open_all_cpu(cb, lost_cb, cb_cookie, page_cnt));
  564. return StatusTuple::OK();
  565. }
  566. StatusTuple BPF::close_perf_buffer(const std::string& name) {
  567. auto it = perf_buffers_.find(name);
  568. if (it == perf_buffers_.end())
  569. return StatusTuple(-1, "Perf buffer for %s not open", name.c_str());
  570. TRY2(it->second->close_all_cpu());
  571. return StatusTuple::OK();
  572. }
  573. BPFPerfBuffer* BPF::get_perf_buffer(const std::string& name) {
  574. auto it = perf_buffers_.find(name);
  575. return (it == perf_buffers_.end()) ? nullptr : it->second;
  576. }
  577. int BPF::poll_perf_buffer(const std::string& name, int timeout_ms) {
  578. auto it = perf_buffers_.find(name);
  579. if (it == perf_buffers_.end())
  580. return -1;
  581. return it->second->poll(timeout_ms);
  582. }
  583. size_t BPF:: get_num_functions() {return bpf_module_->num_functions();}
  584. const char *BPF::get_function_name(size_t id) {
  585. if (!bpf_module_) return nullptr;
  586. return bpf_module_->function_name(id);
  587. }
  588. StatusTuple BPF::load_func(const std::string& func_name, bpf_prog_type type,
  589. int& fd, unsigned flags, bpf_attach_type expected_attach_type) {
  590. if (funcs_.find(func_name) != funcs_.end()) {
  591. fd = funcs_[func_name];
  592. return StatusTuple::OK();
  593. }
  594. uint8_t* func_start = bpf_module_->function_start(func_name);
  595. if (!func_start)
  596. return StatusTuple(-1, "Can't find start of function %s",
  597. func_name.c_str());
  598. size_t func_size = bpf_module_->function_size(func_name);
  599. int log_level = 0;
  600. if (flag_ & DEBUG_BPF_REGISTER_STATE)
  601. log_level = 2;
  602. else if (flag_ & DEBUG_BPF)
  603. log_level = 1;
  604. fd = bpf_module_->bcc_func_load(type, func_name.c_str(),
  605. reinterpret_cast<struct bpf_insn*>(func_start), func_size,
  606. bpf_module_->license(), bpf_module_->kern_version(),
  607. log_level, nullptr, 0, nullptr, flags, expected_attach_type);
  608. if (fd < 0)
  609. return StatusTuple(-1, "Failed to load %s: %d", func_name.c_str(), fd);
  610. int ret = bpf_module_->annotate_prog_tag(
  611. func_name, fd, reinterpret_cast<struct bpf_insn*>(func_start), func_size);
  612. if (ret < 0)
  613. fprintf(stderr, "WARNING: cannot get prog tag, ignore saving source with program tag\n");
  614. funcs_[func_name] = fd;
  615. return StatusTuple::OK();
  616. }
  617. StatusTuple BPF::unload_func(const std::string& func_name) {
  618. auto it = funcs_.find(func_name);
  619. if (it == funcs_.end())
  620. return StatusTuple::OK();
  621. int res = close(it->second);
  622. if (res != 0)
  623. return StatusTuple(-1, "Can't close FD for %s: %d", it->first.c_str(), res);
  624. funcs_.erase(it);
  625. return StatusTuple::OK();
  626. }
  627. StatusTuple BPF::attach_func(int prog_fd, int attachable_fd,
  628. enum bpf_attach_type attach_type,
  629. uint64_t flags) {
  630. int res = bpf_module_->bcc_func_attach(prog_fd, attachable_fd, attach_type, flags);
  631. if (res != 0)
  632. return StatusTuple(-1, "Can't attach for prog_fd %d, attachable_fd %d, "
  633. "attach_type %d, flags %ld: error %d",
  634. prog_fd, attachable_fd, attach_type, flags, res);
  635. return StatusTuple::OK();
  636. }
  637. StatusTuple BPF::detach_func(int prog_fd, int attachable_fd,
  638. enum bpf_attach_type attach_type) {
  639. int res = bpf_module_->bcc_func_detach(prog_fd, attachable_fd, attach_type);
  640. if (res != 0)
  641. return StatusTuple(-1, "Can't detach for prog_fd %d, attachable_fd %d, "
  642. "attach_type %d: error %d",
  643. prog_fd, attachable_fd, attach_type, res);
  644. return StatusTuple::OK();
  645. }
  646. std::string BPF::get_syscall_fnname(const std::string& name) {
  647. if (syscall_prefix_ == nullptr) {
  648. KSyms ksym;
  649. uint64_t addr;
  650. if (ksym.resolve_name(nullptr, "sys_bpf", &addr))
  651. syscall_prefix_.reset(new std::string("sys_"));
  652. else if (ksym.resolve_name(nullptr, "__x64_sys_bpf", &addr))
  653. syscall_prefix_.reset(new std::string("__x64_sys_"));
  654. else
  655. syscall_prefix_.reset(new std::string());
  656. }
  657. return *syscall_prefix_ + name;
  658. }
  659. StatusTuple BPF::check_binary_symbol(const std::string& binary_path,
  660. const std::string& symbol,
  661. uint64_t symbol_addr,
  662. std::string& module_res,
  663. uint64_t& offset_res, pid_t pid,
  664. uint64_t symbol_offset) {
  665. bcc_symbol output;
  666. int res = bcc_resolve_symname(binary_path.c_str(), symbol.c_str(),
  667. symbol_addr, pid, nullptr, &output);
  668. if (res < 0)
  669. return StatusTuple(
  670. -1, "Unable to find offset for binary %s symbol %s address %lx",
  671. binary_path.c_str(), symbol.c_str(), symbol_addr);
  672. if (output.module) {
  673. module_res = output.module;
  674. ::free(const_cast<char*>(output.module));
  675. } else {
  676. module_res = binary_path;
  677. }
  678. offset_res = output.offset + symbol_offset;
  679. return StatusTuple::OK();
  680. }
  681. std::string BPF::get_kprobe_event(const std::string& kernel_func,
  682. bpf_probe_attach_type type) {
  683. std::string res = attach_type_prefix(type) + "_";
  684. res += sanitize_str(kernel_func, &BPF::kprobe_event_validator);
  685. return res;
  686. }
  687. BPFProgTable BPF::get_prog_table(const std::string& name) {
  688. TableStorage::iterator it;
  689. if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it))
  690. return BPFProgTable(it->second);
  691. return BPFProgTable({});
  692. }
  693. BPFCgroupArray BPF::get_cgroup_array(const std::string& name) {
  694. TableStorage::iterator it;
  695. if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it))
  696. return BPFCgroupArray(it->second);
  697. return BPFCgroupArray({});
  698. }
  699. BPFDevmapTable BPF::get_devmap_table(const std::string& name) {
  700. TableStorage::iterator it;
  701. if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it))
  702. return BPFDevmapTable(it->second);
  703. return BPFDevmapTable({});
  704. }
  705. BPFXskmapTable BPF::get_xskmap_table(const std::string& name) {
  706. TableStorage::iterator it;
  707. if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it))
  708. return BPFXskmapTable(it->second);
  709. return BPFXskmapTable({});
  710. }
  711. BPFStackTable BPF::get_stack_table(const std::string& name, bool use_debug_file,
  712. bool check_debug_file_crc) {
  713. TableStorage::iterator it;
  714. if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it))
  715. return BPFStackTable(it->second, use_debug_file, check_debug_file_crc);
  716. return BPFStackTable({}, use_debug_file, check_debug_file_crc);
  717. }
  718. BPFStackBuildIdTable BPF::get_stackbuildid_table(const std::string &name, bool use_debug_file,
  719. bool check_debug_file_crc) {
  720. TableStorage::iterator it;
  721. if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it))
  722. return BPFStackBuildIdTable(it->second, use_debug_file, check_debug_file_crc, get_bsymcache());
  723. return BPFStackBuildIdTable({}, use_debug_file, check_debug_file_crc, get_bsymcache());
  724. }
  725. BPFSockmapTable BPF::get_sockmap_table(const std::string& name) {
  726. TableStorage::iterator it;
  727. if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it))
  728. return BPFSockmapTable(it->second);
  729. return BPFSockmapTable({});
  730. }
  731. BPFSockhashTable BPF::get_sockhash_table(const std::string& name) {
  732. TableStorage::iterator it;
  733. if (bpf_module_->table_storage().Find(Path({bpf_module_->id(), name}), it))
  734. return BPFSockhashTable(it->second);
  735. return BPFSockhashTable({});
  736. }
  737. bool BPF::add_module(std::string module)
  738. {
  739. return bcc_buildsymcache_add_module(get_bsymcache(), module.c_str()) != 0 ?
  740. false : true;
  741. }
  742. namespace {
  743. constexpr size_t kEventNameSizeLimit = 224;
  744. std::string shorten_event_name(const std::string& name) {
  745. std::string hash = uint_to_hex(std::hash<std::string>{}(name));
  746. return name.substr(0, kEventNameSizeLimit - hash.size()) + hash;
  747. }
  748. } // namespace
  749. std::string BPF::get_uprobe_event(const std::string& binary_path,
  750. uint64_t offset, bpf_probe_attach_type type,
  751. pid_t pid) {
  752. std::string res = attach_type_prefix(type) + "_";
  753. res += sanitize_str(binary_path, &BPF::uprobe_path_validator);
  754. res += "_0x" + uint_to_hex(offset);
  755. if (pid != -1)
  756. res += "_" + std::to_string(pid);
  757. if (res.size() > kEventNameSizeLimit) {
  758. return shorten_event_name(res);
  759. }
  760. return res;
  761. }
  762. StatusTuple BPF::detach_kprobe_event(const std::string& event,
  763. open_probe_t& attr) {
  764. bpf_close_perf_event_fd(attr.perf_event_fd);
  765. TRY2(unload_func(attr.func));
  766. if (bpf_detach_kprobe(event.c_str()) < 0)
  767. return StatusTuple(-1, "Unable to detach kprobe %s", event.c_str());
  768. return StatusTuple::OK();
  769. }
  770. StatusTuple BPF::detach_uprobe_event(const std::string& event,
  771. open_probe_t& attr) {
  772. bpf_close_perf_event_fd(attr.perf_event_fd);
  773. TRY2(unload_func(attr.func));
  774. if (bpf_detach_uprobe(event.c_str()) < 0)
  775. return StatusTuple(-1, "Unable to detach uprobe %s", event.c_str());
  776. return StatusTuple::OK();
  777. }
  778. StatusTuple BPF::detach_tracepoint_event(const std::string& tracepoint,
  779. open_probe_t& attr) {
  780. bpf_close_perf_event_fd(attr.perf_event_fd);
  781. TRY2(unload_func(attr.func));
  782. // TODO: bpf_detach_tracepoint currently does nothing.
  783. return StatusTuple::OK();
  784. }
  785. StatusTuple BPF::detach_raw_tracepoint_event(const std::string& tracepoint,
  786. open_probe_t& attr) {
  787. TRY2(close(attr.perf_event_fd));
  788. TRY2(unload_func(attr.func));
  789. return StatusTuple::OK();
  790. }
  791. StatusTuple BPF::detach_perf_event_all_cpu(open_probe_t& attr) {
  792. bool has_error = false;
  793. std::string err_msg;
  794. for (const auto& it : *attr.per_cpu_fd) {
  795. int res = bpf_close_perf_event_fd(it.second);
  796. if (res != 0) {
  797. has_error = true;
  798. err_msg += "Failed to close perf event FD " + std::to_string(it.second) +
  799. " For CPU " + std::to_string(it.first) + ": ";
  800. err_msg += std::string(std::strerror(errno)) + "\n";
  801. }
  802. }
  803. delete attr.per_cpu_fd;
  804. TRY2(unload_func(attr.func));
  805. if (has_error)
  806. return StatusTuple(-1, err_msg);
  807. return StatusTuple::OK();
  808. }
  809. int BPF::free_bcc_memory() {
  810. return bcc_free_memory();
  811. }
  812. USDT::USDT(const std::string& binary_path, const std::string& provider,
  813. const std::string& name, const std::string& probe_func)
  814. : initialized_(false),
  815. binary_path_(binary_path),
  816. pid_(-1),
  817. provider_(provider),
  818. name_(name),
  819. probe_func_(probe_func),
  820. mod_match_inode_only_(1) {}
  821. USDT::USDT(pid_t pid, const std::string& provider, const std::string& name,
  822. const std::string& probe_func)
  823. : initialized_(false),
  824. binary_path_(),
  825. pid_(pid),
  826. provider_(provider),
  827. name_(name),
  828. probe_func_(probe_func),
  829. mod_match_inode_only_(1) {}
  830. USDT::USDT(const std::string& binary_path, pid_t pid,
  831. const std::string& provider, const std::string& name,
  832. const std::string& probe_func)
  833. : initialized_(false),
  834. binary_path_(binary_path),
  835. pid_(pid),
  836. provider_(provider),
  837. name_(name),
  838. probe_func_(probe_func),
  839. mod_match_inode_only_(1) {}
  840. USDT::USDT(const USDT& usdt)
  841. : initialized_(false),
  842. binary_path_(usdt.binary_path_),
  843. pid_(usdt.pid_),
  844. provider_(usdt.provider_),
  845. name_(usdt.name_),
  846. probe_func_(usdt.probe_func_),
  847. mod_match_inode_only_(usdt.mod_match_inode_only_) {}
  848. USDT::USDT(USDT&& usdt) noexcept
  849. : initialized_(usdt.initialized_),
  850. binary_path_(std::move(usdt.binary_path_)),
  851. pid_(usdt.pid_),
  852. provider_(std::move(usdt.provider_)),
  853. name_(std::move(usdt.name_)),
  854. probe_func_(std::move(usdt.probe_func_)),
  855. probe_(std::move(usdt.probe_)),
  856. program_text_(std::move(usdt.program_text_)),
  857. mod_match_inode_only_(usdt.mod_match_inode_only_) {
  858. usdt.initialized_ = false;
  859. }
  860. bool USDT::operator==(const USDT& other) const {
  861. return (provider_ == other.provider_) && (name_ == other.name_) &&
  862. (binary_path_ == other.binary_path_) && (pid_ == other.pid_) &&
  863. (probe_func_ == other.probe_func_);
  864. }
  865. int USDT::set_probe_matching_kludge(uint8_t kludge) {
  866. if (kludge != 0 && kludge != 1)
  867. return -1;
  868. mod_match_inode_only_ = kludge;
  869. return 0;
  870. }
  871. StatusTuple USDT::init() {
  872. std::unique_ptr<::USDT::Context> ctx;
  873. if (!binary_path_.empty() && pid_ > 0)
  874. ctx.reset(new ::USDT::Context(pid_, binary_path_, mod_match_inode_only_));
  875. else if (!binary_path_.empty())
  876. ctx.reset(new ::USDT::Context(binary_path_, mod_match_inode_only_));
  877. else if (pid_ > 0)
  878. ctx.reset(new ::USDT::Context(pid_, mod_match_inode_only_));
  879. else
  880. return StatusTuple(-1, "No valid Binary Path or PID provided");
  881. if (!ctx->loaded())
  882. return StatusTuple(-1, "Unable to load USDT " + print_name());
  883. auto deleter = [](void* probe) { delete static_cast<::USDT::Probe*>(probe); };
  884. for (auto& p : ctx->probes_) {
  885. if (p->provider_ == provider_ && p->name_ == name_) {
  886. // Take ownership of the probe that we are interested in, and avoid it
  887. // being destructed when we destruct the USDT::Context instance
  888. probe_ = std::unique_ptr<void, std::function<void(void*)>>(p.release(),
  889. deleter);
  890. p.swap(ctx->probes_.back());
  891. ctx->probes_.pop_back();
  892. break;
  893. }
  894. }
  895. if (!probe_)
  896. return StatusTuple(-1, "Unable to find USDT " + print_name());
  897. ctx.reset(nullptr);
  898. auto& probe = *static_cast<::USDT::Probe*>(probe_.get());
  899. std::ostringstream stream;
  900. if (!probe.usdt_getarg(stream, probe_func_))
  901. return StatusTuple(
  902. -1, "Unable to generate program text for USDT " + print_name());
  903. program_text_ = ::USDT::USDT_PROGRAM_HEADER + stream.str();
  904. initialized_ = true;
  905. return StatusTuple::OK();
  906. }
  907. } // namespace ebpf