bzip2/
lib.rs

1//! Bzip compression for Rust
2//!
3//! This library contains bindings to [`libbz2`] to support bzip compression and
4//! decompression for Rust. The streams offered in this library are primarily
5//! found in the [`mod@read`] and [`mod@write`] modules. Both compressors and
6//! decompressors are available in each module depending on what operation you
7//! need.
8//!
9//! A more low-level interface, much closer to the interface of [`libbz2`], is
10//! available via the [`Compress`] and [`Decompress`] structs.
11//!
12//! [`libbz2`]: https://sourceware.org/bzip2/manual/manual.html
13//!
14//! # Example
15//!
16//! ```
17//! use std::io::{BufRead, Read, Write};
18//! use bzip2::Compression;
19//! use bzip2::read::{BzEncoder, BzDecoder};
20//!
21//! // Round trip some bytes from a byte source, into a compressor, into a
22//! // decompressor, and finally into a vector.
23//! let data = "Hello, World!".as_bytes();
24//! let compressor = BzEncoder::new(data, Compression::best());
25//! let mut decompressor = BzDecoder::new(compressor);
26//!
27//! let mut contents = String::new();
28//! decompressor.read_to_string(&mut contents).unwrap();
29//! assert_eq!(contents, "Hello, World!");
30//! ```
31//!
32//! # Multistreams (e.g. Wikipedia or pbzip2)
33//!
34//! Some tools such as pbzip2 or data from sources such as Wikipedia
35//! are encoded as so called bzip2 "multistreams," meaning they
36//! contain back to back chunks of bzip'd data. `BzDecoder` does not
37//! attempt to convert anything after the the first bzip chunk in the
38//! source stream. Thus, if you wish to decode all bzip chunks from
39//! the input until end of file, use `MultiBzDecoder`.
40//!
41//! *Protip*: If you use `BzDecoder` to decode data and the output is
42//! incomplete and exactly 900K bytes, you probably need a
43//! `MultiBzDecoder`.
44//!
45//! All methods are internally capable of working with streams that may return
46//! [`ErrorKind::WouldBlock`](std::io::ErrorKind::WouldBlock) when they're not
47//! ready to perform the particular operation.
48//!
49//! Note that care needs to be taken when using these objects, however. The
50//! Tokio runtime, in particular, requires that data is fully flushed before
51//! dropping streams. For compatibility with blocking streams all streams are
52//! flushed/written when they are dropped, and this is not always a suitable
53//! time to perform I/O. If I/O streams are flushed before drop, however, then
54//! these operations will be a noop.
55
56#![deny(missing_docs)]
57#![doc(html_root_url = "https://docs.rs/bzip2/")]
58
59#[cfg(not(feature = "libbz2-rs-sys"))]
60extern crate bzip2_sys as ffi;
61#[cfg(feature = "libbz2-rs-sys")]
62extern crate libbz2_rs_sys as ffi;
63#[cfg(test)]
64extern crate partial_io;
65#[cfg(test)]
66extern crate quickcheck;
67#[cfg(test)]
68extern crate rand;
69
70pub use mem::{Action, Compress, Decompress, Error, Status};
71
72mod mem;
73
74pub mod bufread;
75pub mod read;
76pub mod write;
77
78/// When compressing data, the compression level can be specified by a value in
79/// this enum.
80#[derive(Copy, Clone, Debug, PartialEq, Eq)]
81pub struct Compression(u32);
82
83impl Compression {
84    /// Create a new compression spec with a specific numeric level in the range `1..=9`.
85    ///
86    /// # Panics
87    ///
88    /// A level outside of the `1..=9` range will throw a panic. Use [`Self::try_new`] to
89    /// gracefully handle invalid levels (e.g. from user input).
90    #[track_caller]
91    pub const fn new(level: u32) -> Compression {
92        match Self::try_new(level) {
93            Some(v) => v,
94            None => panic!("expected a compression level in the range 1..=9"),
95        }
96    }
97
98    /// Create a new compression spec with a specific numeric level in the range `1..=9`.
99    pub const fn try_new(level: u32) -> Option<Compression> {
100        match level {
101            1..=9 => Some(Compression(level)),
102            _ => None,
103        }
104    }
105
106    /// Do not compress.
107    #[deprecated(since = "0.5.1", note = "libbz2 does not support compression level 0")]
108    pub fn none() -> Compression {
109        Compression(0)
110    }
111
112    /// Optimize for the best speed of encoding.
113    pub const fn fast() -> Compression {
114        Compression(1)
115    }
116
117    /// Optimize for smallest output size.
118    pub const fn best() -> Compression {
119        Compression(9)
120    }
121
122    /// Return the compression level as an integer.
123    pub const fn level(&self) -> u32 {
124        self.0
125    }
126}
127
128impl Default for Compression {
129    /// Choose the default compression, a balance between speed and size.
130    fn default() -> Compression {
131        Compression(6)
132    }
133}
134
135#[cfg(test)]
136mod test {
137    use super::*;
138
139    #[test]
140    #[should_panic]
141    fn new_level_0() {
142        Compression::new(0);
143    }
144
145    #[test]
146    #[should_panic]
147    fn new_level_10() {
148        Compression::new(10);
149    }
150
151    #[test]
152    fn try_new() {
153        assert!(Compression::try_new(0).is_none());
154        assert!(Compression::try_new(10).is_none());
155
156        assert_eq!(Compression::try_new(1), Some(Compression::fast()));
157        assert_eq!(Compression::try_new(6), Some(Compression::default()));
158        assert_eq!(Compression::try_new(9), Some(Compression::best()));
159    }
160}