diff --git a/fuzz/dct_fuzzer.cc b/fuzz/dct_fuzzer.cc index 25a68ea0..a99e9e0e 100644 --- a/fuzz/dct_fuzzer.cc +++ b/fuzz/dct_fuzzer.cc @@ -31,6 +31,7 @@ FuzzHelper::doChecks() // jpeg_start_decompress is called. During normal use of qpdf very large JPEGs can occasionally // occur legitimately and therefore must be allowed during normal operations. Pl_DCT::setMemoryLimit(200'000'000); + Pl_DCT::setScanLimit(50); // Do not decompress corrupt data. This may cause extended runtime within jpeglib without // exercising additional code paths in qpdf. diff --git a/fuzz/qpdf_fuzzer.cc b/fuzz/qpdf_fuzzer.cc index 16e175e4..45d924d3 100644 --- a/fuzz/qpdf_fuzzer.cc +++ b/fuzz/qpdf_fuzzer.cc @@ -181,6 +181,7 @@ FuzzHelper::doChecks() // jpeg_start_decompress is called. During normal use of qpdf very large JPEGs can occasionally // occur legitimately and therefore must be allowed during normal operations. Pl_DCT::setMemoryLimit(100'000'000); + Pl_DCT::setScanLimit(50); Pl_PNGFilter::setMemoryLimit(1'000'000); Pl_TIFFPredictor::setMemoryLimit(1'000'000); diff --git a/include/qpdf/Pl_DCT.hh b/include/qpdf/Pl_DCT.hh index 077a1f92..0671d1a8 100644 --- a/include/qpdf/Pl_DCT.hh +++ b/include/qpdf/Pl_DCT.hh @@ -39,6 +39,11 @@ class QPDF_DLL_CLASS Pl_DCT: public Pipeline QPDF_DLL static void setMemoryLimit(long limit); + // Limit the number of scans used by jpeglib when decompressing progressive jpegs. + // NB This is a static option affecting all Pl_DCT instances. + QPDF_DLL + static void setScanLimit(int limit); + // Treat corrupt data as a runtime error rather than attempting to decompress regardless. This // is the qpdf default behaviour. To attempt to decompress corrupt data set 'treat_as_error' to // false. diff --git a/libqpdf/Pl_DCT.cc b/libqpdf/Pl_DCT.cc index fe3da5c6..3288a298 100644 --- a/libqpdf/Pl_DCT.cc +++ b/libqpdf/Pl_DCT.cc @@ -22,6 +22,7 @@ namespace }; long memory_limit{0}; + int scan_limit{0}; bool throw_on_corrupt_data{true}; } // namespace @@ -45,6 +46,17 @@ emit_message(j_common_ptr cinfo, int msg_level) } } +static void +progress_monitor(j_common_ptr cinfo) +{ + if (cinfo->is_decompressor && + reinterpret_cast(cinfo)->input_scan_number > scan_limit) { + auto* jerr = reinterpret_cast(cinfo->err); + jerr->msg = "Pl_DCT::decompress: JPEG data has too many scans"; + longjmp(jerr->jmpbuf, 1); + } +} + Pl_DCT::Members::Members() : action(a_decompress), buf("DCT compressed image") @@ -74,6 +86,12 @@ Pl_DCT::setMemoryLimit(long limit) memory_limit = limit; } +void +Pl_DCT::setScanLimit(int limit) +{ + scan_limit = limit; +} + void Pl_DCT::setThrowOnCorruptData(bool treat_as_error) { @@ -341,6 +359,11 @@ Pl_DCT::decompress(void* cinfo_p, Buffer* b) // first warning is encountered causing a timeout in oss-fuzz. throw std::runtime_error("Pl_DCT::decompress: JPEG data large - may be too slow"); } + jpeg_progress_mgr progress_mgr; + if (scan_limit > 0) { + progress_mgr.progress_monitor = &progress_monitor; + cinfo->progress = &progress_mgr; + } JSAMPARRAY buffer = (*cinfo->mem->alloc_sarray)(reinterpret_cast(cinfo), JPOOL_IMAGE, width, 1);