Make tokens more token-y (less dictionary-y) (#2371)

This commit is contained in:
Dominik Maier 2024-07-09 17:37:37 +02:00 committed by GitHub
parent 40f9cc946c
commit 721fd3b14e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 22 additions and 20 deletions

View File

@ -4,7 +4,7 @@ This folder contains an example fuzzer tailored for fuzzbench.
It uses the best possible setting, with the exception of a SimpleRestartingEventManager instead of an LlmpEventManager - since fuzzbench is single threaded. It uses the best possible setting, with the exception of a SimpleRestartingEventManager instead of an LlmpEventManager - since fuzzbench is single threaded.
Real fuzz campaigns should consider using multithreaded LlmpEventManager, see the other examples. Real fuzz campaigns should consider using multithreaded LlmpEventManager, see the other examples.
This fuzzer autodetect if the dictionary and the initial inputs are text or binary data, and enables Grimoire in case of text. This fuzzer autodetect if the passed-in tokens and the initial inputs are text or binary data, and enables Grimoire in case of text.
## Build ## Build

View File

@ -1,5 +1,5 @@
# #
# AFL dictionary for JPEG images # AFL tokens file for JPEG images
# ------------------------------ # ------------------------------
# #
# Created by Michal Zalewski # Created by Michal Zalewski

View File

@ -891,26 +891,28 @@ impl<'a, SP> ForkserverExecutorBuilder<'a, SP> {
if status & FS_NEW_OPT_AUTODICT != 0 { if status & FS_NEW_OPT_AUTODICT != 0 {
// Here unlike shmem input fuzzing, we are forced to read things // Here unlike shmem input fuzzing, we are forced to read things
// hence no self.autotokens.is_some() to check if we proceed // hence no self.autotokens.is_some() to check if we proceed
let (read_len, dict_size) = forkserver.read_st()?; let (read_len, autotokens_size) = forkserver.read_st()?;
if read_len != 4 { if read_len != 4 {
return Err(Error::unknown( return Err(Error::unknown(
"Failed to read dictionary size from forkserver".to_string(), "Failed to read autotokens size from forkserver".to_string(),
)); ));
} }
if !(2..=0xffffff).contains(&dict_size) { let tokens_size_max = 0xffffff;
if !(2..=tokens_size_max).contains(&autotokens_size) {
return Err(Error::illegal_state( return Err(Error::illegal_state(
"Dictionary has an illegal size".to_string(), format!("Autotokens size is incorrect, expected 2 to {tokens_size_max} (inclusive), but got {autotokens_size}. Make sure your afl-cc verison is up to date."),
)); ));
} }
log::info!("Autodict size {dict_size:x}"); log::info!("Autotokens size {autotokens_size:x}");
let (rlen, buf) = forkserver.read_st_size(dict_size as usize)?; let (rlen, buf) = forkserver.read_st_size(autotokens_size as usize)?;
if rlen != dict_size as usize { if rlen != autotokens_size as usize {
return Err(Error::unknown("Failed to load autodictionary".to_string())); return Err(Error::unknown("Failed to load autotokens".to_string()));
} }
if let Some(t) = &mut self.autotokens { if let Some(t) = &mut self.autotokens {
t.parse_autodict(&buf, dict_size as usize); t.parse_autodict(&buf, autotokens_size as usize);
} }
} }

View File

@ -384,7 +384,7 @@ impl<'a> LibfuzzerOptionsBuilder<'a> {
unicode: self.unicode.unwrap_or(true), unicode: self.unicode.unwrap_or(true),
forks: self.forks, forks: self.forks,
dict: self.dict.map(|path| { dict: self.dict.map(|path| {
Tokens::from_file(path).expect("Couldn't load tokens from specified dictionary") Tokens::from_file(path).expect("Couldn't load tokens from specified tokens file")
}), }),
dirs: self.dirs.into_iter().map(PathBuf::from).collect(), dirs: self.dirs.into_iter().map(PathBuf::from).collect(),
ignore_crashes: self.ignore_crashes.unwrap_or_default(), ignore_crashes: self.ignore_crashes.unwrap_or_default(),

View File

@ -239,7 +239,7 @@ void __afl_start_forkserver(void) {
void (*old_sigchld_handler)(int) = signal(SIGCHLD, SIG_DFL); void (*old_sigchld_handler)(int) = signal(SIGCHLD, SIG_DFL);
int autodict_on = __token_start != NULL && __token_stop != NULL; int autotokens_on = __token_start != NULL && __token_stop != NULL;
/* Phone home and tell the parent that we're OK. If parent isn't there, /* Phone home and tell the parent that we're OK. If parent isn't there,
assume we're not running in forkserver mode and just execute program. */ assume we're not running in forkserver mode and just execute program. */
@ -256,7 +256,7 @@ void __afl_start_forkserver(void) {
status = FS_NEW_OPT_MAPSIZE; status = FS_NEW_OPT_MAPSIZE;
if (__afl_sharedmem_fuzzing) { status |= FS_NEW_OPT_SHDMEM_FUZZ; } if (__afl_sharedmem_fuzzing) { status |= FS_NEW_OPT_SHDMEM_FUZZ; }
if (autodict_on) { status |= FS_NEW_OPT_AUTODICT; } if (autotokens_on) { status |= FS_NEW_OPT_AUTODICT; }
if (write(FORKSRV_FD + 1, msg, 4) != 4) { _exit(1); } if (write(FORKSRV_FD + 1, msg, 4) != 4) { _exit(1); }
@ -266,14 +266,14 @@ void __afl_start_forkserver(void) {
status = __afl_map_size; status = __afl_map_size;
if (write(FORKSRV_FD + 1, msg, 4) != 4) { _exit(1); } if (write(FORKSRV_FD + 1, msg, 4) != 4) { _exit(1); }
// FS_NEW_OPT_AUTODICT - send autodictionary // FS_NEW_OPT_AUTODICT - send autotokens
if (autodict_on) { if (autotokens_on) {
// pass the dictionary through the forkserver FD // pass the autotokens through the forkserver FD
uint32_t len = (__token_stop - __token_start), offset = 0; uint32_t len = (__token_stop - __token_start), offset = 0;
if (write(FORKSRV_FD + 1, &len, 4) != 4) { if (write(FORKSRV_FD + 1, &len, 4) != 4) {
write(2, "Error: could not send dictionary len\n", write(2, "Error: could not send autotokens len\n",
strlen("Error: could not send dictionary len\n")); strlen("Error: could not send autotokens len\n"));
_exit(1); _exit(1);
} }
@ -282,7 +282,7 @@ void __afl_start_forkserver(void) {
ret = write(FORKSRV_FD + 1, __token_start + offset, len); ret = write(FORKSRV_FD + 1, __token_start + offset, len);
if (ret < 1) { if (ret < 1) {
write_error("could not send dictionary"); write_error("could not send autotokens");
_exit(1); _exit(1);
} }