Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

res-lock v4 #8679

Closed
wants to merge 121 commits into from
Closed
Changes from 5 commits
Commits
Show all changes
121 commits
Select commit Hold shift + click to select a range
fc3ab17
Merge branch 'selftests-bpf-implement-setting-global-variables-in-ver…
anakryiko Feb 26, 2025
bacac21
bpf/helpers: Refactor bpf_dynptr_read and bpf_dynptr_write
mykyta5 Feb 26, 2025
9d15404
bpf/helpers: Introduce bpf_dynptr_copy kfunc
mykyta5 Feb 26, 2025
8fc1834
selftests/bpf: Add tests for bpf_dynptr_copy
mykyta5 Feb 26, 2025
43d9d43
Merge branch 'introduce-bpf_dynptr_copy-kfunc'
anakryiko Feb 26, 2025
27e3162
selftests/bpf: Allow auto port binding for cgroup connect
mrpre Feb 27, 2025
dbe7d46
selftests/bpf: Allow auto port binding for bpf nf
mrpre Feb 27, 2025
09de329
selftests/bpf: Fixes for test_maps test
mrpre Feb 27, 2025
0ffa016
Merge branch 'optimize-bpf-selftest-to-increase-ci-success-rate'
Feb 27, 2025
78a8a85
bpf: Allow pre-ordering for bpf cgroup progs
Feb 24, 2025
42c5e6d
selftests/bpf: Add selftests allowing cgroup prog pre-ordering
Feb 24, 2025
0aaddfb
locking/local_lock: Introduce localtry_lock_t
Feb 22, 2025
97769a5
mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation
Feb 22, 2025
8c57b68
mm, bpf: Introduce free_pages_nolock()
Feb 22, 2025
01d3722
memcg: Use trylock to access memcg stock_lock.
Feb 22, 2025
e8d78db
mm, bpf: Use memcg in try_alloc_pages().
Feb 22, 2025
c9eb810
bpf: Use try_alloc_pages() to allocate pages for bpf needs.
Feb 22, 2025
93ed6fc
Merge branch 'bpf-mm-introduce-try_alloc_pages'
Feb 27, 2025
e4d68c0
Merge branch 'bpf-next/try_alloc_pages' into bpf-next/master
Feb 27, 2025
0b93631
bpf/selftests: test_select_reuseport_kern: Remove unused header
Tropicao Feb 27, 2025
90d8c89
bpf: Summarize sleepable global subprogs
kkdwivedi Mar 1, 2025
6e2cc60
selftests/bpf: Test sleepable global subprogs in atomic contexts
kkdwivedi Mar 1, 2025
ce9add7
selftests/bpf: Add tests for extending sleepable global subprogs
kkdwivedi Mar 1, 2025
53415f6
Merge branch 'global-subprogs-in-rcu-preempt-irq-disabled-sections'
Mar 2, 2025
17a82ed
bpf: no longer acquire map_idr_lock in bpf_map_inc_not_zero()
Mar 1, 2025
b2d9ef7
bpf: Factor out atomic_ptr_type_ok()
peilin-ye Mar 3, 2025
d430c46
bpf: Factor out check_atomic_rmw()
peilin-ye Mar 3, 2025
d38ad24
bpf: Factor out check_load_mem() and check_store_reg()
peilin-ye Mar 3, 2025
128cd76
veristat: @files-list.txt notation for object files list
eddyz87 Mar 1, 2025
1649753
veristat: Strerror expects positive number (errno)
eddyz87 Mar 1, 2025
b127528
veristat: Report program type guess results to sdterr
eddyz87 Mar 1, 2025
0bf6c8a
Merge branch 'veristat-files-list-txt-notation-for-object-files-list'
anakryiko Mar 3, 2025
6829f3c
selftests/bpf: test_tunnel: Add generic_attach* helpers
bastien-curutchet Mar 3, 2025
7289e59
selftests/bpf: test_tunnel: Add ping helpers
bastien-curutchet Mar 3, 2025
08d20ea
selftests/bpf: test_tunnel: Move gre tunnel test to test_progs
bastien-curutchet Mar 3, 2025
1ea01a8
selftests/bpf: test_tunnel: Move ip6gre tunnel test to test_progs
bastien-curutchet Mar 3, 2025
0ecd1e9
selftests/bpf: test_tunnel: Move erspan tunnel tests to test_progs
bastien-curutchet Mar 3, 2025
cae41f7
selftests/bpf: test_tunnel: Move ip6erspan tunnel test to test_progs
bastien-curutchet Mar 3, 2025
d89542d
selftests/bpf: test_tunnel: Move geneve tunnel test to test_progs
bastien-curutchet Mar 3, 2025
8d86094
selftests/bpf: test_tunnel: Move ip6geneve tunnel test to test_progs
bastien-curutchet Mar 3, 2025
680a752
selftests/bpf: test_tunnel: Move ip6tnl tunnel tests to test_progs
bastien-curutchet Mar 3, 2025
c8d6d78
selftests/bpf: test_tunnel: Remove test_tunnel.sh
bastien-curutchet Mar 3, 2025
a36a835
Merge branch 'selftests-bpf-migrate-test_tunnel-sh-to-test_progs'
Mar 3, 2025
122f1fd
net: filter: Avoid shadowing variable in bpf_convert_ctx_access()
leitao Feb 28, 2025
7218ff1
libbpf: Use map_is_created helper in map setters
mykyta5 Mar 3, 2025
8ca8f6d
libbpf: Introduce more granular state for bpf_object
mykyta5 Mar 3, 2025
da75554
libbpf: Split bpf object load into prepare/load
mykyta5 Mar 3, 2025
68b61a8
selftests/bpf: Add tests for bpf_object__prepare
mykyta5 Mar 3, 2025
7586e21
Merge branch 'introduce-bpf_object__prepare'
anakryiko Mar 3, 2025
13a664f
bpf: Add verifier support for timed may_goto
kkdwivedi Mar 4, 2025
2cb0a52
bpf, x86: Add x86 JIT support for timed may_goto
kkdwivedi Mar 4, 2025
ad55432
Merge branch 'timed-may_goto'
Mar 4, 2025
e24bbad
bpf: Introduce load-acquire and store-release instructions
peilin-ye Mar 4, 2025
4170a60
arm64: insn: Add BIT(23) to {load,store}_ex's mask
peilin-ye Mar 4, 2025
248b190
arm64: insn: Add load-acquire and store-release instructions
peilin-ye Mar 4, 2025
1bfe7f6
bpf, arm64: Support load-acquire and store-release instructions
peilin-ye Mar 4, 2025
14c0427
bpf, x86: Support load-acquire and store-release instructions
peilin-ye Mar 4, 2025
953df09
selftests/bpf: Add selftests for load-acquire and store-release instr…
peilin-ye Mar 4, 2025
c6287f1
Merge branch 'introduce-load-acquire-and-store-release-bpf-instructions'
Mar 4, 2025
1c15257
bpf: jmp_offset() and verbose_insn() utility functions
eddyz87 Mar 4, 2025
0ae958e
bpf: get_call_summary() utility function
eddyz87 Mar 4, 2025
7dad036
bpf: simple DFA-based live registers analysis
eddyz87 Mar 4, 2025
994a876
bpf: use register liveness information for func_states_equal
eddyz87 Mar 4, 2025
8a3fc22
selftests/bpf: test cases for compute_live_registers()
eddyz87 Mar 4, 2025
42ba8a4
Merge branch 'bpf-simple-dfa-based-live-registers-analysis'
Mar 4, 2025
aae1add
bpf: correct use/def for may_goto instruction
eddyz87 Mar 5, 2025
7781fd0
bpf, docs: Fix broken link to renamed bpf_iter_task_vmas.c
Mar 4, 2025
994fd3e
selftests/bpf: Introduce cond_break_label
kkdwivedi Mar 6, 2025
0201027
selftests/bpf: Introduce arena spin lock
kkdwivedi Mar 6, 2025
313149f
selftests/bpf: Add tests for arena spin lock
kkdwivedi Mar 6, 2025
48b3be8
Merge branch 'arena-spin-lock'
Mar 6, 2025
88b1c42
selftests/bpf: Move test_lwt_ip_encap to test_progs
bastien-curutchet Mar 4, 2025
5cb4077
selftests/bpf: Clean up call sites of stdio_restore()
ameryhung Mar 5, 2025
6d54a02
selftests/bpf: Allow assigning traffic monitor print function
ameryhung Mar 5, 2025
15bfc10
selftests/bpf: Fix dangling stdout seen by traffic monitor thread
ameryhung Mar 5, 2025
7e437dc
selftests/bpf: Fix cap_enable_effective() return code
kknjh Mar 5, 2025
359d070
selftests/bpf: lwt_seg6local: Remove unused routes
bastien-curutchet Mar 7, 2025
3fb97a2
selftests/bpf: lwt_seg6local: Move test to test_progs
bastien-curutchet Mar 7, 2025
f282146
Merge branch 'selftests-bpf-move-test_lwt_seg6local-to-test_progs'
Mar 8, 2025
63f99cd
bpf: add kfunc for populating cpumask bits
etsal Mar 9, 2025
3524b15
selftests: bpf: add bpf_cpumask_populate selftests
etsal Mar 9, 2025
d70870e
bpf: fix missing kdoc string fields in cpumask.c
etsal Mar 9, 2025
93ececb
selftests: bpf: fix duplicate selftests in cpumask_success.
etsal Mar 9, 2025
79d93c8
Merge branch 'bpf-introduce-helper-for-populating-bpf_cpumask'
Mar 10, 2025
74f36a9
selftests/bpf: Fix selection of static vs. dynamic LLVM
aspsk Mar 10, 2025
26350a2
mm: Fix the flipped condition in gfpflags_allow_spinning()
tehcaster Mar 10, 2025
bf5af29
selftests/bpf: Convert comma to semicolon
Mar 10, 2025
a8cd035
security: Propagate caller information in bpf hooks
Mar 10, 2025
f563314
selftests/bpf: Add a kernel flag test for LSM bpf hook
Mar 10, 2025
a68894a
Merge branch 'security-propagate-caller-information-in-bpf-hooks'
Mar 11, 2025
a74a2a3
bpf: bpftool: Setting error code in do_loader()
nswon Mar 11, 2025
be741c7
bpf: preload: Add MODULE_DESCRIPTION
arndb Mar 10, 2025
46d38f4
selftests/bpf: Fix arena_spin_lock compilation on PowerPC
kkdwivedi Mar 11, 2025
956e816
selftests/bpf: Fix string read in strncmp benchmark
viktormalik Mar 13, 2025
2d7597d
selftests/bpf: Fix sockopt selftest failure on powerpc
Mar 11, 2025
dc8d1d6
adding ci files
Mar 13, 2025
95ec1c0
locking: Move MCS struct definition to public header
kkdwivedi Aug 27, 2024
24632c0
locking: Move common qspinlock helpers to a private header
kkdwivedi Aug 15, 2024
3f43c73
locking: Allow obtaining result of arch_mcs_spin_lock_contended
kkdwivedi Aug 15, 2024
7267c90
locking: Copy out qspinlock.c to kernel/bpf/rqspinlock.c
kkdwivedi Aug 15, 2024
b0d6223
rqspinlock: Add rqspinlock.h header
kkdwivedi Aug 27, 2024
fdbaf16
rqspinlock: Drop PV and virtualization support
kkdwivedi Oct 10, 2024
535709e
rqspinlock: Add support for timeouts
kkdwivedi Aug 15, 2024
a7424dd
rqspinlock: Hardcode cond_acquire loops for arm64
kkdwivedi Feb 3, 2025
e6cd61e
rqspinlock: Protect pending bit owners from stalls
kkdwivedi Aug 15, 2024
2aa3ccf
rqspinlock: Protect waiters in queue from stalls
kkdwivedi Aug 15, 2024
76240ff
rqspinlock: Protect waiters in trylock fallback from stalls
kkdwivedi Aug 15, 2024
9b4cb3a
rqspinlock: Add deadlock detection and recovery
kkdwivedi Nov 19, 2024
afde85d
rqspinlock: Add a test-and-set fallback
kkdwivedi Feb 4, 2025
9007a0d
rqspinlock: Add basic support for CONFIG_PARAVIRT
kkdwivedi Oct 16, 2024
13f5d29
rqspinlock: Add helper to print a splat on timeout or deadlock
kkdwivedi Oct 16, 2024
2d32d87
rqspinlock: Add macros for rqspinlock usage
kkdwivedi Nov 19, 2024
67c16a2
rqspinlock: Add entry to Makefile, MAINTAINERS
kkdwivedi Aug 27, 2024
5a3fc5e
rqspinlock: Add locktorture support
kkdwivedi Nov 20, 2024
283903b
bpf: Convert hashtab.c to rqspinlock
kkdwivedi Nov 19, 2024
6212aeb
bpf: Convert percpu_freelist.c to rqspinlock
kkdwivedi Nov 20, 2024
71acbb0
bpf: Convert lpm_trie.c to rqspinlock
kkdwivedi Nov 20, 2024
333c4ff
bpf: Introduce rqspinlock kfuncs
kkdwivedi Aug 15, 2024
e93b739
bpf: Implement verifier support for rqspinlock
kkdwivedi Dec 13, 2024
6f50023
bpf: Maintain FIFO property for rqspinlock unlock
kkdwivedi Jan 27, 2025
dfb942c
selftests/bpf: Add tests for rqspinlock
kkdwivedi Jul 30, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
199 changes: 132 additions & 67 deletions tools/lib/bpf/libbpf.c
Original file line number Diff line number Diff line change
@@ -670,11 +670,18 @@ struct elf_state {

struct usdt_manager;

enum bpf_object_state {
OBJ_OPEN,
OBJ_PREPARED,
OBJ_LOADED,
};

struct bpf_object {
char name[BPF_OBJ_NAME_LEN];
char license[64];
__u32 kern_version;

enum bpf_object_state state;
struct bpf_program *programs;
size_t nr_programs;
struct bpf_map *maps;
@@ -686,7 +693,6 @@ struct bpf_object {
int nr_extern;
int kconfig_map_idx;

bool loaded;
bool has_subcalls;
bool has_rodata;

@@ -1511,7 +1517,7 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->kconfig_map_idx = -1;

obj->kern_version = get_kernel_version();
obj->loaded = false;
obj->state = OBJ_OPEN;

return obj;
}
@@ -4845,14 +4851,19 @@ static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
return 0;
}

static bool map_is_created(const struct bpf_map *map)
{
return map->obj->state >= OBJ_PREPARED || map->reused;
}

bool bpf_map__autocreate(const struct bpf_map *map)
{
return map->autocreate;
}

int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
{
if (map->obj->loaded)
if (map_is_created(map))
return libbpf_err(-EBUSY);

map->autocreate = autocreate;
@@ -4946,7 +4957,7 @@ struct bpf_map *bpf_map__inner_map(struct bpf_map *map)

int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
{
if (map->obj->loaded)
if (map_is_created(map))
return libbpf_err(-EBUSY);

map->def.max_entries = max_entries;
@@ -5191,11 +5202,6 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)

static void bpf_map__destroy(struct bpf_map *map);

static bool map_is_created(const struct bpf_map *map)
{
return map->obj->loaded || map->reused;
}

static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
{
LIBBPF_OPTS(bpf_map_create_opts, create_attr);
@@ -7895,13 +7901,6 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
size_t i;
int err;

for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
err = bpf_object__sanitize_prog(obj, prog);
if (err)
return err;
}

for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
if (prog_is_subprog(obj, prog))
@@ -7927,6 +7926,21 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
return 0;
}

static int bpf_object_prepare_progs(struct bpf_object *obj)
{
struct bpf_program *prog;
size_t i;
int err;

for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
err = bpf_object__sanitize_prog(obj, prog);
if (err)
return err;
}
return 0;
}

static const struct bpf_sec_def *find_sec_def(const char *sec_name);

static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
@@ -8543,14 +8557,77 @@ static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
return 0;
}

static void bpf_object_unpin(struct bpf_object *obj)
{
int i;

/* unpin any maps that were auto-pinned during load */
for (i = 0; i < obj->nr_maps; i++)
if (obj->maps[i].pinned && !obj->maps[i].reused)
bpf_map__unpin(&obj->maps[i], NULL);
}

static void bpf_object_post_load_cleanup(struct bpf_object *obj)
{
int i;

/* clean up fd_array */
zfree(&obj->fd_array);

/* clean up module BTFs */
for (i = 0; i < obj->btf_module_cnt; i++) {
close(obj->btf_modules[i].fd);
btf__free(obj->btf_modules[i].btf);
free(obj->btf_modules[i].name);
}
obj->btf_module_cnt = 0;
zfree(&obj->btf_modules);

/* clean up vmlinux BTF */
btf__free(obj->btf_vmlinux);
obj->btf_vmlinux = NULL;
}

static int bpf_object_prepare(struct bpf_object *obj, const char *target_btf_path)
{
int err;

if (obj->state >= OBJ_PREPARED) {
pr_warn("object '%s': prepare loading can't be attempted twice\n", obj->name);
return -EINVAL;
}

err = bpf_object_prepare_token(obj);
err = err ? : bpf_object__probe_loading(obj);
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
err = err ? : bpf_object__sanitize_maps(obj);
err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
err = err ? : bpf_object__sanitize_and_load_btf(obj);
err = err ? : bpf_object__create_maps(obj);
err = err ? : bpf_object_prepare_progs(obj);

if (err) {
bpf_object_unpin(obj);
bpf_object_unload(obj);
obj->state = OBJ_LOADED;
return err;
}

obj->state = OBJ_PREPARED;
return 0;
}

static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
{
int err, i;
int err;

if (!obj)
return libbpf_err(-EINVAL);

if (obj->loaded) {
if (obj->state >= OBJ_LOADED) {
pr_warn("object '%s': load can't be attempted twice\n", obj->name);
return libbpf_err(-EINVAL);
}
@@ -8565,17 +8642,12 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
return libbpf_err(-LIBBPF_ERRNO__ENDIAN);
}

err = bpf_object_prepare_token(obj);
err = err ? : bpf_object__probe_loading(obj);
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
err = err ? : bpf_object__sanitize_maps(obj);
err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
err = err ? : bpf_object__sanitize_and_load_btf(obj);
err = err ? : bpf_object__create_maps(obj);
err = err ? : bpf_object__load_progs(obj, extra_log_level);
if (obj->state < OBJ_PREPARED) {
err = bpf_object_prepare(obj, target_btf_path);
if (err)
return libbpf_err(err);
}
err = bpf_object__load_progs(obj, extra_log_level);
err = err ? : bpf_object_init_prog_arrays(obj);
err = err ? : bpf_object_prepare_struct_ops(obj);

@@ -8587,36 +8659,22 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
}

/* clean up fd_array */
zfree(&obj->fd_array);
bpf_object_post_load_cleanup(obj);
obj->state = OBJ_LOADED; /* doesn't matter if successfully or not */

/* clean up module BTFs */
for (i = 0; i < obj->btf_module_cnt; i++) {
close(obj->btf_modules[i].fd);
btf__free(obj->btf_modules[i].btf);
free(obj->btf_modules[i].name);
if (err) {
bpf_object_unpin(obj);
bpf_object_unload(obj);
pr_warn("failed to load object '%s'\n", obj->path);
return libbpf_err(err);
}
free(obj->btf_modules);

/* clean up vmlinux BTF */
btf__free(obj->btf_vmlinux);
obj->btf_vmlinux = NULL;

obj->loaded = true; /* doesn't matter if successfully or not */

if (err)
goto out;

return 0;
out:
/* unpin any maps that were auto-pinned during load */
for (i = 0; i < obj->nr_maps; i++)
if (obj->maps[i].pinned && !obj->maps[i].reused)
bpf_map__unpin(&obj->maps[i], NULL);
}

bpf_object_unload(obj);
pr_warn("failed to load object '%s'\n", obj->path);
return libbpf_err(err);
int bpf_object__prepare(struct bpf_object *obj)
{
return libbpf_err(bpf_object_prepare(obj, NULL));
}

int bpf_object__load(struct bpf_object *obj)
@@ -8866,7 +8924,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
if (!obj)
return libbpf_err(-ENOENT);

if (!obj->loaded) {
if (obj->state < OBJ_PREPARED) {
pr_warn("object not yet loaded; load it first\n");
return libbpf_err(-ENOENT);
}
@@ -8945,7 +9003,7 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
if (!obj)
return libbpf_err(-ENOENT);

if (!obj->loaded) {
if (obj->state < OBJ_LOADED) {
pr_warn("object not yet loaded; load it first\n");
return libbpf_err(-ENOENT);
}
@@ -9064,6 +9122,13 @@ void bpf_object__close(struct bpf_object *obj)
if (IS_ERR_OR_NULL(obj))
return;

/*
* if user called bpf_object__prepare() without ever getting to
* bpf_object__load(), we need to clean up stuff that is normally
* cleaned up at the end of loading step
*/
bpf_object_post_load_cleanup(obj);

usdt_manager_free(obj->usdt_man);
obj->usdt_man = NULL;

@@ -9132,7 +9197,7 @@ int bpf_object__btf_fd(const struct bpf_object *obj)

int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
{
if (obj->loaded)
if (obj->state >= OBJ_LOADED)
return libbpf_err(-EINVAL);

obj->kern_version = kern_version;
@@ -9229,7 +9294,7 @@ bool bpf_program__autoload(const struct bpf_program *prog)

int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
{
if (prog->obj->loaded)
if (prog->obj->state >= OBJ_LOADED)
return libbpf_err(-EINVAL);

prog->autoload = autoload;
@@ -9261,7 +9326,7 @@ int bpf_program__set_insns(struct bpf_program *prog,
{
struct bpf_insn *insns;

if (prog->obj->loaded)
if (prog->obj->state >= OBJ_LOADED)
return libbpf_err(-EBUSY);

insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
@@ -9304,7 +9369,7 @@ static int last_custom_sec_def_handler_id;

int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
{
if (prog->obj->loaded)
if (prog->obj->state >= OBJ_LOADED)
return libbpf_err(-EBUSY);

/* if type is not changed, do nothing */
@@ -9335,7 +9400,7 @@ enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program
int bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type)
{
if (prog->obj->loaded)
if (prog->obj->state >= OBJ_LOADED)
return libbpf_err(-EBUSY);

prog->expected_attach_type = type;
@@ -9349,7 +9414,7 @@ __u32 bpf_program__flags(const struct bpf_program *prog)

int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
{
if (prog->obj->loaded)
if (prog->obj->state >= OBJ_LOADED)
return libbpf_err(-EBUSY);

prog->prog_flags = flags;
@@ -9363,7 +9428,7 @@ __u32 bpf_program__log_level(const struct bpf_program *prog)

int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
{
if (prog->obj->loaded)
if (prog->obj->state >= OBJ_LOADED)
return libbpf_err(-EBUSY);

prog->log_level = log_level;
@@ -9382,7 +9447,7 @@ int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log
return libbpf_err(-EINVAL);
if (prog->log_size > UINT_MAX)
return libbpf_err(-EINVAL);
if (prog->obj->loaded)
if (prog->obj->state >= OBJ_LOADED)
return libbpf_err(-EBUSY);

prog->log_buf = log_buf;
@@ -10299,7 +10364,7 @@ static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)

int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
{
if (map->obj->loaded || map->reused)
if (map_is_created(map))
return libbpf_err(-EBUSY);

if (map->mmaped) {
@@ -10345,7 +10410,7 @@ int bpf_map__set_initial_value(struct bpf_map *map,
{
size_t actual_sz;

if (map->obj->loaded || map->reused)
if (map_is_created(map))
return libbpf_err(-EBUSY);

if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG)
@@ -13666,7 +13731,7 @@ int bpf_program__set_attach_target(struct bpf_program *prog,
if (!prog || attach_prog_fd < 0)
return libbpf_err(-EINVAL);

if (prog->obj->loaded)
if (prog->obj->state >= OBJ_LOADED)
return libbpf_err(-EINVAL);

if (attach_prog_fd && !attach_func_name) {
13 changes: 13 additions & 0 deletions tools/lib/bpf/libbpf.h
Original file line number Diff line number Diff line change
@@ -241,6 +241,19 @@ LIBBPF_API struct bpf_object *
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
const struct bpf_object_open_opts *opts);

/**
* @brief **bpf_object__prepare()** prepares BPF object for loading:
* performs ELF processing, relocations, prepares final state of BPF program
* instructions (accessible with bpf_program__insns()), creates and
* (potentially) pins maps. Leaves BPF object in the state ready for program
* loading.
* @param obj Pointer to a valid BPF object instance returned by
* **bpf_object__open*()** API
* @return 0, on success; negative error code, otherwise, error code is
* stored in errno
*/
int bpf_object__prepare(struct bpf_object *obj);

/**
* @brief **bpf_object__load()** loads BPF object into kernel.
* @param obj Pointer to a valid BPF object instance returned by
1 change: 1 addition & 0 deletions tools/lib/bpf/libbpf.map
Original file line number Diff line number Diff line change
@@ -436,6 +436,7 @@ LIBBPF_1.6.0 {
bpf_linker__add_buf;
bpf_linker__add_fd;
bpf_linker__new_fd;
bpf_object__prepare;
btf__add_decl_attr;
btf__add_type_attr;
} LIBBPF_1.5.0;
99 changes: 99 additions & 0 deletions tools/testing/selftests/bpf/prog_tests/prepare.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2025 Meta */

#include <test_progs.h>
#include <network_helpers.h>
#include "prepare.skel.h"

static bool check_prepared(struct bpf_object *obj)
{
bool is_prepared = true;
const struct bpf_map *map;

bpf_object__for_each_map(map, obj) {
if (bpf_map__fd(map) < 0)
is_prepared = false;
}

return is_prepared;
}

static void test_prepare_no_load(void)
{
struct prepare *skel;
int err;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
);

skel = prepare__open();
if (!ASSERT_OK_PTR(skel, "prepare__open"))
return;

if (!ASSERT_FALSE(check_prepared(skel->obj), "not check_prepared"))
goto cleanup;

err = bpf_object__prepare(skel->obj);

if (!ASSERT_TRUE(check_prepared(skel->obj), "check_prepared"))
goto cleanup;

if (!ASSERT_OK(err, "bpf_object__prepare"))
goto cleanup;

cleanup:
prepare__destroy(skel);
}

static void test_prepare_load(void)
{
struct prepare *skel;
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
);

skel = prepare__open();
if (!ASSERT_OK_PTR(skel, "prepare__open"))
return;

if (!ASSERT_FALSE(check_prepared(skel->obj), "not check_prepared"))
goto cleanup;

err = bpf_object__prepare(skel->obj);
if (!ASSERT_OK(err, "bpf_object__prepare"))
goto cleanup;

err = prepare__load(skel);
if (!ASSERT_OK(err, "prepare__load"))
goto cleanup;

if (!ASSERT_TRUE(check_prepared(skel->obj), "check_prepared"))
goto cleanup;

prog_fd = bpf_program__fd(skel->progs.program);
if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
goto cleanup;

err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
goto cleanup;

if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
goto cleanup;

ASSERT_EQ(skel->bss->err, 0, "err");

cleanup:
prepare__destroy(skel);
}

void test_prepare(void)
{
if (test__start_subtest("prepare_load"))
test_prepare_load();
if (test__start_subtest("prepare_no_load"))
test_prepare_no_load();
}
28 changes: 28 additions & 0 deletions tools/testing/selftests/bpf/progs/prepare.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2025 Meta */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
//#include <bpf/bpf_tracing.h>

char _license[] SEC("license") = "GPL";

int err;

struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 4096);
} ringbuf SEC(".maps");

struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u32);
} array_map SEC(".maps");

SEC("cgroup_skb/egress")
int program(struct __sk_buff *skb)
{
err = 0;
return 0;
}