@@ -17,9 +17,7 @@ pub mod path_splitting {
1717 FileDirOverlap ( String ) ,
1818 }
1919
20- fn split_by_separator < ' a > (
21- entry_path : & ' a str ,
22- ) -> Result < impl Iterator < Item = & ' a str > , PathSplitError > {
20+ fn split_by_separator ( entry_path : & str ) -> Result < impl Iterator < Item = & str > , PathSplitError > {
2321 if entry_path. contains ( '\\' ) {
2422 if entry_path. contains ( '/' ) {
2523 return Err ( PathSplitError :: PathFormat ( format ! (
@@ -152,20 +150,20 @@ pub mod path_splitting {
152150 . entry ( component)
153151 . or_insert_with ( || Box :: new ( FSEntry :: Dir ( DirEntry :: default ( ) ) ) ) ;
154152 cur_dir = match next_subdir. as_mut ( ) {
155- & mut FSEntry :: File ( _) => {
153+ FSEntry :: File ( _) => {
156154 return Err ( PathSplitError :: FileDirOverlap ( format ! (
157155 "a file was already registered at the same path as the dir entry {:?}" ,
158156 entry_path
159157 ) ) ) ;
160158 }
161- & mut FSEntry :: Dir ( ref mut subdir) => subdir,
159+ FSEntry :: Dir ( ref mut subdir) => subdir,
162160 }
163161 }
164162 match file_component {
165163 Some ( filename) => {
166164 /* We can't handle duplicate file paths, as that might mess up our
167165 * parallelization strategy. */
168- if let Some ( _ ) = cur_dir. children . get ( filename) {
166+ if cur_dir. children . contains_key ( filename) {
169167 return Err ( PathSplitError :: FileDirOverlap ( format ! (
170168 "another file or directory was already registered at the same path as the file entry {:?}" ,
171169 entry_path
@@ -179,7 +177,7 @@ pub mod path_splitting {
179177 /* We can't handle duplicate directory entries for the exact same normalized
180178 * path, as it's not clear how to merge the possibility of two separate file
181179 * permissions. */
182- if let Some ( _ ) = cur_dir. properties . replace ( data) {
180+ if cur_dir. properties . replace ( data) . is_some ( ) {
183181 return Err ( PathSplitError :: FileDirOverlap ( format ! (
184182 "another directory was already registered at the path {:?}" ,
185183 entry_path
@@ -456,6 +454,7 @@ pub mod handle_creation {
456454 * as a proxy. This should be considered if requested by users. */
457455 fs:: create_dir_all ( top_level_extraction_dir) ?;
458456
457+ #[ allow( clippy:: mutable_key_type) ]
459458 let mut file_handle_mapping: HashMap < ZipDataHandle < ' a > , fs:: File > = HashMap :: new ( ) ;
460459 let mut entry_queue: VecDeque < ( PathBuf , Box < FSEntry < ' a , & ' a ZipFileData > > ) > =
461460 lex_entry_trie
@@ -757,6 +756,7 @@ pub mod split_extraction {
757756 ) -> impl FnOnce ( ) + Send + ' scope {
758757 move || match f ( ) {
759758 Ok ( ( ) ) => ( ) ,
759+ #[ allow( clippy:: single_match) ]
760760 Err ( e) => match err_sender. send ( e) {
761761 Ok ( ( ) ) => ( ) ,
762762 /* We use an async sender, so this should only error if the receiver has hung
@@ -804,7 +804,7 @@ pub mod split_extraction {
804804 * sections in parallel across a thread pool. */
805805 let input_file = FileInput :: new ( input_file) ?;
806806
807- thread:: scope ( move |ref scope| {
807+ thread:: scope ( move |scope| {
808808 /* (4) Create n parallel consumer pipelines. Threads are spawned into the scope, so
809809 * panics get propagated automatically, and all threads are joined at the end of the
810810 * scope. wrap_spawn_err() is used to enable thread closures to return a Result and
@@ -833,6 +833,7 @@ pub mod split_extraction {
833833 /* Send this consumer pipeline's index to the zip-input-reader thread when it's
834834 * ready to receive new input. */
835835 let queue_sender = queue_sender. clone ( ) ;
836+ #[ allow( clippy:: single_match) ]
836837 let notify_readiness = move || match queue_sender. send ( consumer_index) {
837838 Ok ( ( ) ) => ( ) ,
838839 /* Disconnected; this is expected to occur at the end of extraction. */
@@ -856,7 +857,7 @@ pub mod split_extraction {
856857 #[ cfg( not( target_os = "linux" ) ) ]
857858 let mut s = PipeReadBufferSplicer :: new ( & mut splice_buf) ;
858859
859- for ( ref entry, mut output_file) in uncompressed_receiver. iter ( ) {
860+ for ( entry, mut output_file) in uncompressed_receiver. iter ( ) {
860861 s. splice_to_file_all (
861862 & mut uncompressed_read_end,
862863 ( & mut output_file, 0 ) ,
@@ -891,7 +892,7 @@ pub mod split_extraction {
891892 let mut buffer_allocation: Box < [ u8 ] > =
892893 vec ! [ 0u8 ; decompression_copy_buffer_length] . into_boxed_slice ( ) ;
893894
894- for ( ref entry, output_file) in compressed_receiver. iter ( ) {
895+ for ( entry, output_file) in compressed_receiver. iter ( ) {
895896 /* Construct the decompressing reader. */
896897 let limited_reader = ( ( & mut compressed_read_end)
897898 as & mut dyn Read )
@@ -960,7 +961,7 @@ pub mod split_extraction {
960961 * until we notify them. */
961962 notify_readiness ( ) ;
962963
963- for ( ref entry, data_start, mut output_file) in read_recv. iter ( ) {
964+ for ( entry, data_start, mut output_file) in read_recv. iter ( ) {
964965 /* If uncompressed, we can use copy_file_range() directly, and
965966 * avoid splicing through our decompression pipeline. */
966967 if entry. compression_method == CompressionMethod :: Stored {
@@ -1059,6 +1060,7 @@ pub mod split_extraction {
10591060 . spawn_scoped (
10601061 scope,
10611062 wrap_spawn_err ( err_sender, move || {
1063+ #[ allow( clippy:: mutable_key_type) ]
10621064 let mut file_handle_mapping = file_handle_mapping;
10631065 /* All consumer pipelines share the same channel to notify us of their
10641066 * identity when ready. */
@@ -1071,7 +1073,7 @@ pub mod split_extraction {
10711073
10721074 /* Entries are ordered by their offset, so we will be going monotonically
10731075 * forward in the underlying file. */
1074- for ref entry in shared. files . values ( ) {
1076+ for entry in shared. files . values ( ) {
10751077 /* We have already created all necessary directories, and we set any
10761078 * dir perms after extracting file contents. */
10771079 if entry. is_dir ( ) || entry. is_dir_by_mode ( ) {
@@ -1112,7 +1114,7 @@ pub mod split_extraction {
11121114 /* If no I/O errors occurred, this won't trigger. We will only be able to propagate
11131115 * a single I/O error, but this also avoids propagating any errors triggered after the
11141116 * initial one. */
1115- for err in err_receiver. iter ( ) {
1117+ if let Some ( err) = err_receiver. iter ( ) . next ( ) {
11161118 return Err ( err) ;
11171119 }
11181120
0 commit comments