001/* 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 * 017 */ 018package org.apache.commons.compress.archivers.zip; 019 020import java.io.ByteArrayOutputStream; 021import java.io.File; 022import java.io.FileOutputStream; 023import java.io.IOException; 024import java.io.InputStream; 025import java.io.OutputStream; 026import java.io.RandomAccessFile; 027import java.nio.ByteBuffer; 028import java.util.Calendar; 029import java.util.HashMap; 030import java.util.LinkedList; 031import java.util.List; 032import java.util.Map; 033import java.util.zip.Deflater; 034import java.util.zip.ZipException; 035 036import org.apache.commons.compress.archivers.ArchiveEntry; 037import org.apache.commons.compress.archivers.ArchiveOutputStream; 038import org.apache.commons.compress.utils.IOUtils; 039 040import static org.apache.commons.compress.archivers.zip.ZipConstants.DATA_DESCRIPTOR_MIN_VERSION; 041import static org.apache.commons.compress.archivers.zip.ZipConstants.DWORD; 042import static org.apache.commons.compress.archivers.zip.ZipConstants.INITIAL_VERSION; 043import static org.apache.commons.compress.archivers.zip.ZipConstants.SHORT; 044import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD; 045import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC; 046import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC_SHORT; 047import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MIN_VERSION; 048import static org.apache.commons.compress.archivers.zip.ZipLong.putLong; 049import static org.apache.commons.compress.archivers.zip.ZipShort.putShort; 050 051/** 052 * Reimplementation of {@link java.util.zip.ZipOutputStream 053 * java.util.zip.ZipOutputStream} that does handle the extended 054 * functionality of this package, especially internal/external file 055 * attributes and extra fields with different layouts for local file 056 * data and central directory entries. 057 * 058 * <p>This class will try to use {@link java.io.RandomAccessFile 059 * RandomAccessFile} when you know that the output is going to go to a 060 * file.</p> 061 * 062 * <p>If RandomAccessFile cannot be used, this implementation will use 063 * a Data Descriptor to store size and CRC information for {@link 064 * #DEFLATED DEFLATED} entries, this means, you don't need to 065 * calculate them yourself. Unfortunately this is not possible for 066 * the {@link #STORED STORED} method, here setting the CRC and 067 * uncompressed size information is required before {@link 068 * #putArchiveEntry(ArchiveEntry)} can be called.</p> 069 * 070 * <p>As of Apache Commons Compress 1.3 it transparently supports Zip64 071 * extensions and thus individual entries and archives larger than 4 072 * GB or with more than 65536 entries in most cases but explicit 073 * control is provided via {@link #setUseZip64}. If the stream can not 074 * user RandomAccessFile and you try to write a ZipArchiveEntry of 075 * unknown size then Zip64 extensions will be disabled by default.</p> 076 * 077 * @NotThreadSafe 078 */ 079public class ZipArchiveOutputStream extends ArchiveOutputStream { 080 081 static final int BUFFER_SIZE = 512; 082 private static final int LFH_SIG_OFFSET = 0; 083 private static final int LFH_VERSION_NEEDED_OFFSET = 4; 084 private static final int LFH_GPB_OFFSET = 6; 085 private static final int LFH_METHOD_OFFSET = 8; 086 private static final int LFH_TIME_OFFSET = 10; 087 private static final int LFH_CRC_OFFSET = 14; 088 private static final int LFH_COMPRESSED_SIZE_OFFSET = 18; 089 private static final int LFH_ORIGINAL_SIZE_OFFSET = 22; 090 private static final int LFH_FILENAME_LENGTH_OFFSET = 26; 091 private static final int LFH_EXTRA_LENGTH_OFFSET = 28; 092 private static final int LFH_FILENAME_OFFSET = 30; 093 private static final int CFH_SIG_OFFSET = 0; 094 private static final int CFH_VERSION_MADE_BY_OFFSET = 4; 095 private static final int CFH_VERSION_NEEDED_OFFSET = 6; 096 private static final int CFH_GPB_OFFSET = 8; 097 private static final int CFH_METHOD_OFFSET = 10; 098 private static final int CFH_TIME_OFFSET = 12; 099 private static final int CFH_CRC_OFFSET = 16; 100 private static final int CFH_COMPRESSED_SIZE_OFFSET = 20; 101 private static final int CFH_ORIGINAL_SIZE_OFFSET = 24; 102 private static final int CFH_FILENAME_LENGTH_OFFSET = 28; 103 private static final int CFH_EXTRA_LENGTH_OFFSET = 30; 104 private static final int CFH_COMMENT_LENGTH_OFFSET = 32; 105 private static final int CFH_DISK_NUMBER_OFFSET = 34; 106 private static final int CFH_INTERNAL_ATTRIBUTES_OFFSET = 36; 107 private static final int CFH_EXTERNAL_ATTRIBUTES_OFFSET = 38; 108 private static final int CFH_LFH_OFFSET = 42; 109 private static final int CFH_FILENAME_OFFSET = 46; 110 111 /** indicates if this archive is finished. protected for use in Jar implementation */ 112 protected boolean finished = false; 113 114 /* 115 * Apparently Deflater.setInput gets slowed down a lot on Sun JVMs 116 * when it gets handed a really big buffer. See 117 * https://issues.apache.org/bugzilla/show_bug.cgi?id=45396 118 * 119 * Using a buffer size of 8 kB proved to be a good compromise 120 */ 121 private static final int DEFLATER_BLOCK_SIZE = 8192; 122 123 /** 124 * Compression method for deflated entries. 125 */ 126 public static final int DEFLATED = java.util.zip.ZipEntry.DEFLATED; 127 128 /** 129 * Default compression level for deflated entries. 130 */ 131 public static final int DEFAULT_COMPRESSION = Deflater.DEFAULT_COMPRESSION; 132 133 /** 134 * Compression method for stored entries. 135 */ 136 public static final int STORED = java.util.zip.ZipEntry.STORED; 137 138 /** 139 * default encoding for file names and comment. 140 */ 141 static final String DEFAULT_ENCODING = ZipEncodingHelper.UTF8; 142 143 /** 144 * General purpose flag, which indicates that filenames are 145 * written in UTF-8. 146 * @deprecated use {@link GeneralPurposeBit#UFT8_NAMES_FLAG} instead 147 */ 148 @Deprecated 149 public static final int EFS_FLAG = GeneralPurposeBit.UFT8_NAMES_FLAG; 150 151 private static final byte[] EMPTY = new byte[0]; 152 153 /** 154 * Current entry. 155 */ 156 private CurrentEntry entry; 157 158 /** 159 * The file comment. 160 */ 161 private String comment = ""; 162 163 /** 164 * Compression level for next entry. 165 */ 166 private int level = DEFAULT_COMPRESSION; 167 168 /** 169 * Has the compression level changed when compared to the last 170 * entry? 171 */ 172 private boolean hasCompressionLevelChanged = false; 173 174 /** 175 * Default compression method for next entry. 176 */ 177 private int method = java.util.zip.ZipEntry.DEFLATED; 178 179 /** 180 * List of ZipArchiveEntries written so far. 181 */ 182 private final List<ZipArchiveEntry> entries = 183 new LinkedList<ZipArchiveEntry>(); 184 185 private final StreamCompressor streamCompressor; 186 187 /** 188 * Start of central directory. 189 */ 190 private long cdOffset = 0; 191 192 /** 193 * Length of central directory. 194 */ 195 private long cdLength = 0; 196 197 /** 198 * Helper, a 0 as ZipShort. 199 */ 200 private static final byte[] ZERO = {0, 0}; 201 202 /** 203 * Helper, a 0 as ZipLong. 204 */ 205 private static final byte[] LZERO = {0, 0, 0, 0}; 206 207 private static final byte[] ONE = ZipLong.getBytes(1L); 208 209 /** 210 * Holds the offsets of the LFH starts for each entry. 211 */ 212 private final Map<ZipArchiveEntry, Long> offsets = 213 new HashMap<ZipArchiveEntry, Long>(); 214 215 /** 216 * The encoding to use for filenames and the file comment. 217 * 218 * <p>For a list of possible values see <a 219 * href="http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html">http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html</a>. 220 * Defaults to UTF-8.</p> 221 */ 222 private String encoding = DEFAULT_ENCODING; 223 224 /** 225 * The zip encoding to use for filenames and the file comment. 226 * 227 * This field is of internal use and will be set in {@link 228 * #setEncoding(String)}. 229 */ 230 private ZipEncoding zipEncoding = 231 ZipEncodingHelper.getZipEncoding(DEFAULT_ENCODING); 232 233 234 /** 235 * This Deflater object is used for output. 236 * 237 */ 238 protected final Deflater def; 239 /** 240 * Optional random access output. 241 */ 242 private final RandomAccessFile raf; 243 244 private final OutputStream out; 245 246 /** 247 * whether to use the general purpose bit flag when writing UTF-8 248 * filenames or not. 249 */ 250 private boolean useUTF8Flag = true; 251 252 /** 253 * Whether to encode non-encodable file names as UTF-8. 254 */ 255 private boolean fallbackToUTF8 = false; 256 257 /** 258 * whether to create UnicodePathExtraField-s for each entry. 259 */ 260 private UnicodeExtraFieldPolicy createUnicodeExtraFields = UnicodeExtraFieldPolicy.NEVER; 261 262 /** 263 * Whether anything inside this archive has used a ZIP64 feature. 264 * 265 * @since 1.3 266 */ 267 private boolean hasUsedZip64 = false; 268 269 private Zip64Mode zip64Mode = Zip64Mode.AsNeeded; 270 271 private final byte[] copyBuffer = new byte[32768]; 272 private final Calendar calendarInstance = Calendar.getInstance(); 273 274 /** 275 * Creates a new ZIP OutputStream filtering the underlying stream. 276 * @param out the outputstream to zip 277 */ 278 public ZipArchiveOutputStream(OutputStream out) { 279 this.out = out; 280 this.raf = null; 281 def = new Deflater(level, true); 282 streamCompressor = StreamCompressor.create(out, def); 283 } 284 285 /** 286 * Creates a new ZIP OutputStream writing to a File. Will use 287 * random access if possible. 288 * @param file the file to zip to 289 * @throws IOException on error 290 */ 291 public ZipArchiveOutputStream(File file) throws IOException { 292 OutputStream o = null; 293 RandomAccessFile _raf = null; 294 try { 295 _raf = new RandomAccessFile(file, "rw"); 296 _raf.setLength(0); 297 } catch (IOException e) { 298 IOUtils.closeQuietly(_raf); 299 _raf = null; 300 o = new FileOutputStream(file); 301 } 302 def = new Deflater(level, true); 303 streamCompressor = StreamCompressor.create(_raf, def); 304 out = o; 305 raf = _raf; 306 } 307 308 /** 309 * This method indicates whether this archive is writing to a 310 * seekable stream (i.e., to a random access file). 311 * 312 * <p>For seekable streams, you don't need to calculate the CRC or 313 * uncompressed size for {@link #STORED} entries before 314 * invoking {@link #putArchiveEntry(ArchiveEntry)}. 315 * @return true if seekable 316 */ 317 public boolean isSeekable() { 318 return raf != null; 319 } 320 321 /** 322 * The encoding to use for filenames and the file comment. 323 * 324 * <p>For a list of possible values see <a 325 * href="http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html">http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html</a>. 326 * Defaults to UTF-8.</p> 327 * @param encoding the encoding to use for file names, use null 328 * for the platform's default encoding 329 */ 330 public void setEncoding(final String encoding) { 331 this.encoding = encoding; 332 this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding); 333 if (useUTF8Flag && !ZipEncodingHelper.isUTF8(encoding)) { 334 useUTF8Flag = false; 335 } 336 } 337 338 /** 339 * The encoding to use for filenames and the file comment. 340 * 341 * @return null if using the platform's default character encoding. 342 */ 343 public String getEncoding() { 344 return encoding; 345 } 346 347 /** 348 * Whether to set the language encoding flag if the file name 349 * encoding is UTF-8. 350 * 351 * <p>Defaults to true.</p> 352 * 353 * @param b whether to set the language encoding flag if the file 354 * name encoding is UTF-8 355 */ 356 public void setUseLanguageEncodingFlag(boolean b) { 357 useUTF8Flag = b && ZipEncodingHelper.isUTF8(encoding); 358 } 359 360 /** 361 * Whether to create Unicode Extra Fields. 362 * 363 * <p>Defaults to NEVER.</p> 364 * 365 * @param b whether to create Unicode Extra Fields. 366 */ 367 public void setCreateUnicodeExtraFields(UnicodeExtraFieldPolicy b) { 368 createUnicodeExtraFields = b; 369 } 370 371 /** 372 * Whether to fall back to UTF and the language encoding flag if 373 * the file name cannot be encoded using the specified encoding. 374 * 375 * <p>Defaults to false.</p> 376 * 377 * @param b whether to fall back to UTF and the language encoding 378 * flag if the file name cannot be encoded using the specified 379 * encoding. 380 */ 381 public void setFallbackToUTF8(boolean b) { 382 fallbackToUTF8 = b; 383 } 384 385 /** 386 * Whether Zip64 extensions will be used. 387 * 388 * <p>When setting the mode to {@link Zip64Mode#Never Never}, 389 * {@link #putArchiveEntry}, {@link #closeArchiveEntry}, {@link 390 * #finish} or {@link #close} may throw a {@link 391 * Zip64RequiredException} if the entry's size or the total size 392 * of the archive exceeds 4GB or there are more than 65536 entries 393 * inside the archive. Any archive created in this mode will be 394 * readable by implementations that don't support Zip64.</p> 395 * 396 * <p>When setting the mode to {@link Zip64Mode#Always Always}, 397 * Zip64 extensions will be used for all entries. Any archive 398 * created in this mode may be unreadable by implementations that 399 * don't support Zip64 even if all its contents would be.</p> 400 * 401 * <p>When setting the mode to {@link Zip64Mode#AsNeeded 402 * AsNeeded}, Zip64 extensions will transparently be used for 403 * those entries that require them. This mode can only be used if 404 * the uncompressed size of the {@link ZipArchiveEntry} is known 405 * when calling {@link #putArchiveEntry} or the archive is written 406 * to a seekable output (i.e. you have used the {@link 407 * #ZipArchiveOutputStream(java.io.File) File-arg constructor}) - 408 * this mode is not valid when the output stream is not seekable 409 * and the uncompressed size is unknown when {@link 410 * #putArchiveEntry} is called.</p> 411 * 412 * <p>If no entry inside the resulting archive requires Zip64 413 * extensions then {@link Zip64Mode#Never Never} will create the 414 * smallest archive. {@link Zip64Mode#AsNeeded AsNeeded} will 415 * create a slightly bigger archive if the uncompressed size of 416 * any entry has initially been unknown and create an archive 417 * identical to {@link Zip64Mode#Never Never} otherwise. {@link 418 * Zip64Mode#Always Always} will create an archive that is at 419 * least 24 bytes per entry bigger than the one {@link 420 * Zip64Mode#Never Never} would create.</p> 421 * 422 * <p>Defaults to {@link Zip64Mode#AsNeeded AsNeeded} unless 423 * {@link #putArchiveEntry} is called with an entry of unknown 424 * size and data is written to a non-seekable stream - in this 425 * case the default is {@link Zip64Mode#Never Never}.</p> 426 * 427 * @since 1.3 428 * @param mode Whether Zip64 extensions will be used. 429 */ 430 public void setUseZip64(Zip64Mode mode) { 431 zip64Mode = mode; 432 } 433 434 /** 435 * {@inheritDoc} 436 * @throws Zip64RequiredException if the archive's size exceeds 4 437 * GByte or there are more than 65535 entries inside the archive 438 * and {@link #setUseZip64} is {@link Zip64Mode#Never}. 439 */ 440 @Override 441 public void finish() throws IOException { 442 if (finished) { 443 throw new IOException("This archive has already been finished"); 444 } 445 446 if (entry != null) { 447 throw new IOException("This archive contains unclosed entries."); 448 } 449 450 cdOffset = streamCompressor.getTotalBytesWritten(); 451 writeCentralDirectoryInChunks(); 452 453 cdLength = streamCompressor.getTotalBytesWritten() - cdOffset; 454 writeZip64CentralDirectory(); 455 writeCentralDirectoryEnd(); 456 offsets.clear(); 457 entries.clear(); 458 streamCompressor.close(); 459 finished = true; 460 } 461 462 private void writeCentralDirectoryInChunks() throws IOException { 463 int NUM_PER_WRITE = 1000; 464 ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(70 * NUM_PER_WRITE); 465 int count = 0; 466 for (ZipArchiveEntry ze : entries) { 467 byteArrayOutputStream.write(createCentralFileHeader(ze)); 468 if (++count > NUM_PER_WRITE){ 469 writeCounted(byteArrayOutputStream.toByteArray()); 470 byteArrayOutputStream.reset(); 471 count = 0; 472 } 473 } 474 writeCounted(byteArrayOutputStream.toByteArray()); 475 } 476 477 /** 478 * Writes all necessary data for this entry. 479 * @throws IOException on error 480 * @throws Zip64RequiredException if the entry's uncompressed or 481 * compressed size exceeds 4 GByte and {@link #setUseZip64} 482 * is {@link Zip64Mode#Never}. 483 */ 484 @Override 485 public void closeArchiveEntry() throws IOException { 486 preClose(); 487 488 flushDeflater(); 489 490 long bytesWritten = streamCompressor.getTotalBytesWritten() - entry.dataStart; 491 long realCrc = streamCompressor.getCrc32(); 492 entry.bytesRead = streamCompressor.getBytesRead(); 493 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 494 final boolean actuallyNeedsZip64 = handleSizesAndCrc(bytesWritten, realCrc, effectiveMode); 495 closeEntry(actuallyNeedsZip64, false); 496 streamCompressor.reset(); 497 } 498 499 /** 500 * Writes all necessary data for this entry. 501 * 502 * @throws IOException on error 503 * @throws Zip64RequiredException if the entry's uncompressed or 504 * compressed size exceeds 4 GByte and {@link #setUseZip64} 505 * is {@link Zip64Mode#Never}. 506 * @param phased This entry is second phase of a 2-phase zip creation, size, compressed size and crc 507 * are known in ZipArchiveEntry 508 */ 509 private void closeCopiedEntry(boolean phased) throws IOException { 510 preClose(); 511 entry.bytesRead = entry.entry.getSize(); 512 Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 513 boolean actuallyNeedsZip64 = checkIfNeedsZip64(effectiveMode); 514 closeEntry(actuallyNeedsZip64, phased); 515 } 516 517 private void closeEntry(boolean actuallyNeedsZip64, boolean phased) throws IOException { 518 if (!phased && raf != null) { 519 rewriteSizesAndCrc(actuallyNeedsZip64); 520 } 521 522 writeDataDescriptor(entry.entry); 523 entry = null; 524 } 525 526 private void preClose() throws IOException { 527 if (finished) { 528 throw new IOException("Stream has already been finished"); 529 } 530 531 if (entry == null) { 532 throw new IOException("No current entry to close"); 533 } 534 535 if (!entry.hasWritten) { 536 write(EMPTY, 0, 0); 537 } 538 } 539 540 /** 541 * Adds an archive entry with a raw input stream. 542 * 543 * If crc, size and compressed size are supplied on the entry, these values will be used as-is. 544 * Zip64 status is re-established based on the settings in this stream, and the supplied value 545 * is ignored. 546 * 547 * The entry is put and closed immediately. 548 * 549 * @param entry The archive entry to add 550 * @param rawStream The raw input stream of a different entry. May be compressed/encrypted. 551 * @throws IOException If copying fails 552 */ 553 public void addRawArchiveEntry(ZipArchiveEntry entry, InputStream rawStream) 554 throws IOException { 555 ZipArchiveEntry ae = new ZipArchiveEntry(entry); 556 if (hasZip64Extra(ae)) { 557 // Will be re-added as required. this may make the file generated with this method 558 // somewhat smaller than standard mode, 559 // since standard mode is unable to remove the zip 64 header. 560 ae.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 561 } 562 boolean is2PhaseSource = ae.getCrc() != ZipArchiveEntry.CRC_UNKNOWN 563 && ae.getSize() != ArchiveEntry.SIZE_UNKNOWN 564 && ae.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN; 565 putArchiveEntry(ae, is2PhaseSource); 566 copyFromZipInputStream(rawStream); 567 closeCopiedEntry(is2PhaseSource); 568 } 569 570 /** 571 * Ensures all bytes sent to the deflater are written to the stream. 572 */ 573 private void flushDeflater() throws IOException { 574 if (entry.entry.getMethod() == DEFLATED) { 575 streamCompressor.flushDeflater(); 576 } 577 } 578 579 /** 580 * Ensures the current entry's size and CRC information is set to 581 * the values just written, verifies it isn't too big in the 582 * Zip64Mode.Never case and returns whether the entry would 583 * require a Zip64 extra field. 584 */ 585 private boolean handleSizesAndCrc(long bytesWritten, long crc, 586 Zip64Mode effectiveMode) 587 throws ZipException { 588 if (entry.entry.getMethod() == DEFLATED) { 589 /* It turns out def.getBytesRead() returns wrong values if 590 * the size exceeds 4 GB on Java < Java7 591 entry.entry.setSize(def.getBytesRead()); 592 */ 593 entry.entry.setSize(entry.bytesRead); 594 entry.entry.setCompressedSize(bytesWritten); 595 entry.entry.setCrc(crc); 596 597 } else if (raf == null) { 598 if (entry.entry.getCrc() != crc) { 599 throw new ZipException("bad CRC checksum for entry " 600 + entry.entry.getName() + ": " 601 + Long.toHexString(entry.entry.getCrc()) 602 + " instead of " 603 + Long.toHexString(crc)); 604 } 605 606 if (entry.entry.getSize() != bytesWritten) { 607 throw new ZipException("bad size for entry " 608 + entry.entry.getName() + ": " 609 + entry.entry.getSize() 610 + " instead of " 611 + bytesWritten); 612 } 613 } else { /* method is STORED and we used RandomAccessFile */ 614 entry.entry.setSize(bytesWritten); 615 entry.entry.setCompressedSize(bytesWritten); 616 entry.entry.setCrc(crc); 617 } 618 619 return checkIfNeedsZip64(effectiveMode); 620 } 621 622 /** 623 * Ensures the current entry's size and CRC information is set to 624 * the values just written, verifies it isn't too big in the 625 * Zip64Mode.Never case and returns whether the entry would 626 * require a Zip64 extra field. 627 */ 628 private boolean checkIfNeedsZip64(Zip64Mode effectiveMode) 629 throws ZipException { 630 final boolean actuallyNeedsZip64 = isZip64Required(entry.entry, effectiveMode); 631 if (actuallyNeedsZip64 && effectiveMode == Zip64Mode.Never) { 632 throw new Zip64RequiredException(Zip64RequiredException.getEntryTooBigMessage(entry.entry)); 633 } 634 return actuallyNeedsZip64; 635 } 636 637 private boolean isZip64Required(ZipArchiveEntry entry1, Zip64Mode requestedMode) { 638 return requestedMode == Zip64Mode.Always || isTooLageForZip32(entry1); 639 } 640 641 private boolean isTooLageForZip32(ZipArchiveEntry zipArchiveEntry){ 642 return zipArchiveEntry.getSize() >= ZIP64_MAGIC || zipArchiveEntry.getCompressedSize() >= ZIP64_MAGIC; 643 } 644 645 /** 646 * When using random access output, write the local file header 647 * and potentiall the ZIP64 extra containing the correct CRC and 648 * compressed/uncompressed sizes. 649 */ 650 private void rewriteSizesAndCrc(boolean actuallyNeedsZip64) 651 throws IOException { 652 long save = raf.getFilePointer(); 653 654 raf.seek(entry.localDataStart); 655 writeOut(ZipLong.getBytes(entry.entry.getCrc())); 656 if (!hasZip64Extra(entry.entry) || !actuallyNeedsZip64) { 657 writeOut(ZipLong.getBytes(entry.entry.getCompressedSize())); 658 writeOut(ZipLong.getBytes(entry.entry.getSize())); 659 } else { 660 writeOut(ZipLong.ZIP64_MAGIC.getBytes()); 661 writeOut(ZipLong.ZIP64_MAGIC.getBytes()); 662 } 663 664 if (hasZip64Extra(entry.entry)) { 665 ByteBuffer name = getName(entry.entry); 666 int nameLen = name.limit() - name.position(); 667 // seek to ZIP64 extra, skip header and size information 668 raf.seek(entry.localDataStart + 3 * WORD + 2 * SHORT 669 + nameLen + 2 * SHORT); 670 // inside the ZIP64 extra uncompressed size comes 671 // first, unlike the LFH, CD or data descriptor 672 writeOut(ZipEightByteInteger.getBytes(entry.entry.getSize())); 673 writeOut(ZipEightByteInteger.getBytes(entry.entry.getCompressedSize())); 674 675 if (!actuallyNeedsZip64) { 676 // do some cleanup: 677 // * rewrite version needed to extract 678 raf.seek(entry.localDataStart - 5 * SHORT); 679 writeOut(ZipShort.getBytes(INITIAL_VERSION)); 680 681 // * remove ZIP64 extra so it doesn't get written 682 // to the central directory 683 entry.entry.removeExtraField(Zip64ExtendedInformationExtraField 684 .HEADER_ID); 685 entry.entry.setExtra(); 686 687 // * reset hasUsedZip64 if it has been set because 688 // of this entry 689 if (entry.causedUseOfZip64) { 690 hasUsedZip64 = false; 691 } 692 } 693 } 694 raf.seek(save); 695 } 696 697 /** 698 * {@inheritDoc} 699 * @throws ClassCastException if entry is not an instance of ZipArchiveEntry 700 * @throws Zip64RequiredException if the entry's uncompressed or 701 * compressed size is known to exceed 4 GByte and {@link #setUseZip64} 702 * is {@link Zip64Mode#Never}. 703 */ 704 @Override 705 public void putArchiveEntry(ArchiveEntry archiveEntry) throws IOException { 706 putArchiveEntry(archiveEntry, false); 707 } 708 709 /** 710 * Writes the headers for an archive entry to the output stream. 711 * The caller must then write the content to the stream and call 712 * {@link #closeArchiveEntry()} to complete the process. 713 714 * @param archiveEntry The archiveEntry 715 * @param phased If true size, compressedSize and crc required to be known up-front in the archiveEntry 716 * @throws ClassCastException if entry is not an instance of ZipArchiveEntry 717 * @throws Zip64RequiredException if the entry's uncompressed or 718 * compressed size is known to exceed 4 GByte and {@link #setUseZip64} 719 * is {@link Zip64Mode#Never}. 720 */ 721 private void putArchiveEntry(ArchiveEntry archiveEntry, boolean phased) throws IOException { 722 if (finished) { 723 throw new IOException("Stream has already been finished"); 724 } 725 726 if (entry != null) { 727 closeArchiveEntry(); 728 } 729 730 entry = new CurrentEntry((ZipArchiveEntry) archiveEntry); 731 entries.add(entry.entry); 732 733 setDefaults(entry.entry); 734 735 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 736 validateSizeInformation(effectiveMode); 737 738 if (shouldAddZip64Extra(entry.entry, effectiveMode)) { 739 740 Zip64ExtendedInformationExtraField z64 = getZip64Extra(entry.entry); 741 742 // just a placeholder, real data will be in data 743 // descriptor or inserted later via RandomAccessFile 744 ZipEightByteInteger size = ZipEightByteInteger.ZERO; 745 ZipEightByteInteger compressedSize = ZipEightByteInteger.ZERO; 746 if (phased){ 747 size = new ZipEightByteInteger(entry.entry.getSize()); 748 compressedSize = new ZipEightByteInteger(entry.entry.getCompressedSize()); 749 } else if (entry.entry.getMethod() == STORED 750 && entry.entry.getSize() != ArchiveEntry.SIZE_UNKNOWN) { 751 // actually, we already know the sizes 752 size = new ZipEightByteInteger(entry.entry.getSize()); 753 compressedSize = size; 754 } 755 z64.setSize(size); 756 z64.setCompressedSize(compressedSize); 757 entry.entry.setExtra(); 758 } 759 760 if (entry.entry.getMethod() == DEFLATED && hasCompressionLevelChanged) { 761 def.setLevel(level); 762 hasCompressionLevelChanged = false; 763 } 764 writeLocalFileHeader((ZipArchiveEntry) archiveEntry, phased); 765 } 766 767 /** 768 * Provides default values for compression method and last 769 * modification time. 770 */ 771 private void setDefaults(ZipArchiveEntry entry) { 772 if (entry.getMethod() == -1) { // not specified 773 entry.setMethod(method); 774 } 775 776 if (entry.getTime() == -1) { // not specified 777 entry.setTime(System.currentTimeMillis()); 778 } 779 } 780 781 /** 782 * Throws an exception if the size is unknown for a stored entry 783 * that is written to a non-seekable output or the entry is too 784 * big to be written without Zip64 extra but the mode has been set 785 * to Never. 786 */ 787 private void validateSizeInformation(Zip64Mode effectiveMode) 788 throws ZipException { 789 // Size/CRC not required if RandomAccessFile is used 790 if (entry.entry.getMethod() == STORED && raf == null) { 791 if (entry.entry.getSize() == ArchiveEntry.SIZE_UNKNOWN) { 792 throw new ZipException("uncompressed size is required for" 793 + " STORED method when not writing to a" 794 + " file"); 795 } 796 if (entry.entry.getCrc() == ZipArchiveEntry.CRC_UNKNOWN) { 797 throw new ZipException("crc checksum is required for STORED" 798 + " method when not writing to a file"); 799 } 800 entry.entry.setCompressedSize(entry.entry.getSize()); 801 } 802 803 if ((entry.entry.getSize() >= ZIP64_MAGIC 804 || entry.entry.getCompressedSize() >= ZIP64_MAGIC) 805 && effectiveMode == Zip64Mode.Never) { 806 throw new Zip64RequiredException(Zip64RequiredException 807 .getEntryTooBigMessage(entry.entry)); 808 } 809 } 810 811 /** 812 * Whether to addd a Zip64 extended information extra field to the 813 * local file header. 814 * 815 * <p>Returns true if</p> 816 * 817 * <ul> 818 * <li>mode is Always</li> 819 * <li>or we already know it is going to be needed</li> 820 * <li>or the size is unknown and we can ensure it won't hurt 821 * other implementations if we add it (i.e. we can erase its 822 * usage</li> 823 * </ul> 824 */ 825 private boolean shouldAddZip64Extra(ZipArchiveEntry entry, Zip64Mode mode) { 826 return mode == Zip64Mode.Always 827 || entry.getSize() >= ZIP64_MAGIC 828 || entry.getCompressedSize() >= ZIP64_MAGIC 829 || (entry.getSize() == ArchiveEntry.SIZE_UNKNOWN 830 && raf != null && mode != Zip64Mode.Never); 831 } 832 833 /** 834 * Set the file comment. 835 * @param comment the comment 836 */ 837 public void setComment(String comment) { 838 this.comment = comment; 839 } 840 841 /** 842 * Sets the compression level for subsequent entries. 843 * 844 * <p>Default is Deflater.DEFAULT_COMPRESSION.</p> 845 * @param level the compression level. 846 * @throws IllegalArgumentException if an invalid compression 847 * level is specified. 848 */ 849 public void setLevel(int level) { 850 if (level < Deflater.DEFAULT_COMPRESSION 851 || level > Deflater.BEST_COMPRESSION) { 852 throw new IllegalArgumentException("Invalid compression level: " 853 + level); 854 } 855 hasCompressionLevelChanged = (this.level != level); 856 this.level = level; 857 } 858 859 /** 860 * Sets the default compression method for subsequent entries. 861 * 862 * <p>Default is DEFLATED.</p> 863 * @param method an <code>int</code> from java.util.zip.ZipEntry 864 */ 865 public void setMethod(int method) { 866 this.method = method; 867 } 868 869 /** 870 * Whether this stream is able to write the given entry. 871 * 872 * <p>May return false if it is set up to use encryption or a 873 * compression method that hasn't been implemented yet.</p> 874 * @since 1.1 875 */ 876 @Override 877 public boolean canWriteEntryData(ArchiveEntry ae) { 878 if (ae instanceof ZipArchiveEntry) { 879 ZipArchiveEntry zae = (ZipArchiveEntry) ae; 880 return zae.getMethod() != ZipMethod.IMPLODING.getCode() 881 && zae.getMethod() != ZipMethod.UNSHRINKING.getCode() 882 && ZipUtil.canHandleEntryData(zae); 883 } 884 return false; 885 } 886 887 /** 888 * Writes bytes to ZIP entry. 889 * @param b the byte array to write 890 * @param offset the start position to write from 891 * @param length the number of bytes to write 892 * @throws IOException on error 893 */ 894 @Override 895 public void write(byte[] b, int offset, int length) throws IOException { 896 if (entry == null) { 897 throw new IllegalStateException("No current entry"); 898 } 899 ZipUtil.checkRequestedFeatures(entry.entry); 900 long writtenThisTime = streamCompressor.write(b, offset, length, entry.entry.getMethod()); 901 count(writtenThisTime); 902 } 903 904 /** 905 * Write bytes to output or random access file. 906 * @param data the byte array to write 907 * @throws IOException on error 908 */ 909 private void writeCounted(byte[] data) throws IOException { 910 streamCompressor.writeCounted(data); 911 } 912 913 private void copyFromZipInputStream(InputStream src) throws IOException { 914 if (entry == null) { 915 throw new IllegalStateException("No current entry"); 916 } 917 ZipUtil.checkRequestedFeatures(entry.entry); 918 entry.hasWritten = true; 919 int length; 920 while ((length = src.read(copyBuffer)) >= 0 ) 921 { 922 streamCompressor.writeCounted(copyBuffer, 0, length); 923 count( length ); 924 } 925 } 926 927 /** 928 * Closes this output stream and releases any system resources 929 * associated with the stream. 930 * 931 * @exception IOException if an I/O error occurs. 932 * @throws Zip64RequiredException if the archive's size exceeds 4 933 * GByte or there are more than 65535 entries inside the archive 934 * and {@link #setUseZip64} is {@link Zip64Mode#Never}. 935 */ 936 @Override 937 public void close() throws IOException { 938 if (!finished) { 939 finish(); 940 } 941 destroy(); 942 } 943 944 /** 945 * Flushes this output stream and forces any buffered output bytes 946 * to be written out to the stream. 947 * 948 * @exception IOException if an I/O error occurs. 949 */ 950 @Override 951 public void flush() throws IOException { 952 if (out != null) { 953 out.flush(); 954 } 955 } 956 957 /* 958 * Various ZIP constants 959 */ 960 /** 961 * local file header signature 962 */ 963 static final byte[] LFH_SIG = ZipLong.LFH_SIG.getBytes(); 964 /** 965 * data descriptor signature 966 */ 967 static final byte[] DD_SIG = ZipLong.DD_SIG.getBytes(); 968 /** 969 * central file header signature 970 */ 971 static final byte[] CFH_SIG = ZipLong.CFH_SIG.getBytes(); 972 /** 973 * end of central dir signature 974 */ 975 static final byte[] EOCD_SIG = ZipLong.getBytes(0X06054B50L); 976 /** 977 * ZIP64 end of central dir signature 978 */ 979 static final byte[] ZIP64_EOCD_SIG = ZipLong.getBytes(0X06064B50L); 980 /** 981 * ZIP64 end of central dir locator signature 982 */ 983 static final byte[] ZIP64_EOCD_LOC_SIG = ZipLong.getBytes(0X07064B50L); 984 985 /** 986 * Writes next block of compressed data to the output stream. 987 * @throws IOException on error 988 */ 989 protected final void deflate() throws IOException { 990 streamCompressor.deflate(); 991 } 992 993 /** 994 * Writes the local file header entry 995 * @param ze the entry to write 996 * @throws IOException on error 997 */ 998 protected void writeLocalFileHeader(ZipArchiveEntry ze) throws IOException { 999 writeLocalFileHeader(ze, false); 1000 } 1001 1002 private void writeLocalFileHeader(ZipArchiveEntry ze, boolean phased) throws IOException { 1003 boolean encodable = zipEncoding.canEncode(ze.getName()); 1004 ByteBuffer name = getName(ze); 1005 1006 if (createUnicodeExtraFields != UnicodeExtraFieldPolicy.NEVER) { 1007 addUnicodeExtraFields(ze, encodable, name); 1008 } 1009 1010 final byte[] localHeader = createLocalFileHeader(ze, name, encodable, phased); 1011 long localHeaderStart = streamCompressor.getTotalBytesWritten(); 1012 offsets.put(ze, localHeaderStart); 1013 entry.localDataStart = localHeaderStart + LFH_CRC_OFFSET; // At crc offset 1014 writeCounted(localHeader); 1015 entry.dataStart = streamCompressor.getTotalBytesWritten(); 1016 } 1017 1018 1019 private byte[] createLocalFileHeader(ZipArchiveEntry ze, ByteBuffer name, boolean encodable, 1020 boolean phased) { 1021 byte[] extra = ze.getLocalFileDataExtra(); 1022 final int nameLen = name.limit() - name.position(); 1023 int len= LFH_FILENAME_OFFSET + nameLen + extra.length; 1024 byte[] buf = new byte[len]; 1025 1026 System.arraycopy(LFH_SIG, 0, buf, LFH_SIG_OFFSET, WORD); 1027 1028 //store method in local variable to prevent multiple method calls 1029 final int zipMethod = ze.getMethod(); 1030 1031 if (phased && !isZip64Required(entry.entry, zip64Mode)){ 1032 putShort(INITIAL_VERSION, buf, LFH_VERSION_NEEDED_OFFSET); 1033 } else { 1034 putShort(versionNeededToExtract(zipMethod, hasZip64Extra(ze)), buf, LFH_VERSION_NEEDED_OFFSET); 1035 } 1036 1037 GeneralPurposeBit generalPurposeBit = getGeneralPurposeBits(zipMethod, !encodable && fallbackToUTF8); 1038 generalPurposeBit.encode(buf, LFH_GPB_OFFSET); 1039 1040 // compression method 1041 putShort(zipMethod, buf, LFH_METHOD_OFFSET); 1042 1043 ZipUtil.toDosTime(calendarInstance, ze.getTime(), buf, LFH_TIME_OFFSET); 1044 1045 // CRC 1046 if (phased){ 1047 putLong(ze.getCrc(), buf, LFH_CRC_OFFSET); 1048 } else if (zipMethod == DEFLATED || raf != null) { 1049 System.arraycopy(LZERO, 0, buf, LFH_CRC_OFFSET, WORD); 1050 } else { 1051 putLong(ze.getCrc(), buf, LFH_CRC_OFFSET); 1052 } 1053 1054 // compressed length 1055 // uncompressed length 1056 if (hasZip64Extra(entry.entry)){ 1057 // point to ZIP64 extended information extra field for 1058 // sizes, may get rewritten once sizes are known if 1059 // stream is seekable 1060 ZipLong.ZIP64_MAGIC.putLong(buf, LFH_COMPRESSED_SIZE_OFFSET); 1061 ZipLong.ZIP64_MAGIC.putLong(buf, LFH_ORIGINAL_SIZE_OFFSET); 1062 } else if (phased) { 1063 putLong(ze.getCompressedSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); 1064 putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); 1065 } else if (zipMethod == DEFLATED || raf != null) { 1066 System.arraycopy(LZERO, 0, buf, LFH_COMPRESSED_SIZE_OFFSET, WORD); 1067 System.arraycopy(LZERO, 0, buf, LFH_ORIGINAL_SIZE_OFFSET, WORD); 1068 } else { // Stored 1069 putLong(ze.getSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); 1070 putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); 1071 } 1072 // file name length 1073 putShort(nameLen, buf, LFH_FILENAME_LENGTH_OFFSET); 1074 1075 // extra field length 1076 putShort(extra.length, buf, LFH_EXTRA_LENGTH_OFFSET); 1077 1078 // file name 1079 System.arraycopy( name.array(), name.arrayOffset(), buf, LFH_FILENAME_OFFSET, nameLen); 1080 1081 System.arraycopy(extra, 0, buf, LFH_FILENAME_OFFSET + nameLen, extra.length); 1082 return buf; 1083 } 1084 1085 1086 /** 1087 * Adds UnicodeExtra fields for name and file comment if mode is 1088 * ALWAYS or the data cannot be encoded using the configured 1089 * encoding. 1090 */ 1091 private void addUnicodeExtraFields(ZipArchiveEntry ze, boolean encodable, 1092 ByteBuffer name) 1093 throws IOException { 1094 if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS 1095 || !encodable) { 1096 ze.addExtraField(new UnicodePathExtraField(ze.getName(), 1097 name.array(), 1098 name.arrayOffset(), 1099 name.limit() 1100 - name.position())); 1101 } 1102 1103 String comm = ze.getComment(); 1104 if (comm != null && !"".equals(comm)) { 1105 1106 boolean commentEncodable = zipEncoding.canEncode(comm); 1107 1108 if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS 1109 || !commentEncodable) { 1110 ByteBuffer commentB = getEntryEncoding(ze).encode(comm); 1111 ze.addExtraField(new UnicodeCommentExtraField(comm, 1112 commentB.array(), 1113 commentB.arrayOffset(), 1114 commentB.limit() 1115 - commentB.position()) 1116 ); 1117 } 1118 } 1119 } 1120 1121 /** 1122 * Writes the data descriptor entry. 1123 * @param ze the entry to write 1124 * @throws IOException on error 1125 */ 1126 protected void writeDataDescriptor(ZipArchiveEntry ze) throws IOException { 1127 if (ze.getMethod() != DEFLATED || raf != null) { 1128 return; 1129 } 1130 writeCounted(DD_SIG); 1131 writeCounted(ZipLong.getBytes(ze.getCrc())); 1132 if (!hasZip64Extra(ze)) { 1133 writeCounted(ZipLong.getBytes(ze.getCompressedSize())); 1134 writeCounted(ZipLong.getBytes(ze.getSize())); 1135 } else { 1136 writeCounted(ZipEightByteInteger.getBytes(ze.getCompressedSize())); 1137 writeCounted(ZipEightByteInteger.getBytes(ze.getSize())); 1138 } 1139 } 1140 1141 /** 1142 * Writes the central file header entry. 1143 * @param ze the entry to write 1144 * @throws IOException on error 1145 * @throws Zip64RequiredException if the archive's size exceeds 4 1146 * GByte and {@link Zip64Mode #setUseZip64} is {@link 1147 * Zip64Mode#Never}. 1148 */ 1149 protected void writeCentralFileHeader(ZipArchiveEntry ze) throws IOException { 1150 byte[] centralFileHeader = createCentralFileHeader(ze); 1151 writeCounted(centralFileHeader); 1152 } 1153 1154 private byte[] createCentralFileHeader(ZipArchiveEntry ze) throws IOException { 1155 1156 final long lfhOffset = offsets.get(ze); 1157 final boolean needsZip64Extra = hasZip64Extra(ze) 1158 || ze.getCompressedSize() >= ZIP64_MAGIC 1159 || ze.getSize() >= ZIP64_MAGIC 1160 || lfhOffset >= ZIP64_MAGIC; 1161 1162 if (needsZip64Extra && zip64Mode == Zip64Mode.Never) { 1163 // must be the offset that is too big, otherwise an 1164 // exception would have been throw in putArchiveEntry or 1165 // closeArchiveEntry 1166 throw new Zip64RequiredException(Zip64RequiredException 1167 .ARCHIVE_TOO_BIG_MESSAGE); 1168 } 1169 1170 1171 handleZip64Extra(ze, lfhOffset, needsZip64Extra); 1172 1173 return createCentralFileHeader(ze, getName(ze), lfhOffset, needsZip64Extra); 1174 } 1175 1176 /** 1177 * Writes the central file header entry. 1178 * @param ze the entry to write 1179 * @param name The encoded name 1180 * @param lfhOffset Local file header offset for this file 1181 * @throws IOException on error 1182 */ 1183 private byte[] createCentralFileHeader(ZipArchiveEntry ze, ByteBuffer name, long lfhOffset, 1184 boolean needsZip64Extra) throws IOException { 1185 byte[] extra = ze.getCentralDirectoryExtra(); 1186 1187 // file comment length 1188 String comm = ze.getComment(); 1189 if (comm == null) { 1190 comm = ""; 1191 } 1192 1193 ByteBuffer commentB = getEntryEncoding(ze).encode(comm); 1194 final int nameLen = name.limit() - name.position(); 1195 final int commentLen = commentB.limit() - commentB.position(); 1196 int len= CFH_FILENAME_OFFSET + nameLen + extra.length + commentLen; 1197 byte[] buf = new byte[len]; 1198 1199 System.arraycopy(CFH_SIG, 0, buf, CFH_SIG_OFFSET, WORD); 1200 1201 // version made by 1202 // CheckStyle:MagicNumber OFF 1203 putShort((ze.getPlatform() << 8) | (!hasUsedZip64 ? DATA_DESCRIPTOR_MIN_VERSION : ZIP64_MIN_VERSION), 1204 buf, CFH_VERSION_MADE_BY_OFFSET); 1205 1206 final int zipMethod = ze.getMethod(); 1207 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1208 putShort(versionNeededToExtract(zipMethod, needsZip64Extra), buf, CFH_VERSION_NEEDED_OFFSET); 1209 getGeneralPurposeBits(zipMethod, !encodable && fallbackToUTF8).encode(buf, CFH_GPB_OFFSET); 1210 1211 // compression method 1212 putShort(zipMethod, buf, CFH_METHOD_OFFSET); 1213 1214 1215 // last mod. time and date 1216 ZipUtil.toDosTime(calendarInstance, ze.getTime(), buf, CFH_TIME_OFFSET); 1217 1218 // CRC 1219 // compressed length 1220 // uncompressed length 1221 putLong(ze.getCrc(), buf, CFH_CRC_OFFSET); 1222 if (ze.getCompressedSize() >= ZIP64_MAGIC 1223 || ze.getSize() >= ZIP64_MAGIC) { 1224 ZipLong.ZIP64_MAGIC.putLong(buf, CFH_COMPRESSED_SIZE_OFFSET); 1225 ZipLong.ZIP64_MAGIC.putLong(buf, CFH_ORIGINAL_SIZE_OFFSET); 1226 } else { 1227 putLong(ze.getCompressedSize(), buf, CFH_COMPRESSED_SIZE_OFFSET); 1228 putLong(ze.getSize(), buf, CFH_ORIGINAL_SIZE_OFFSET); 1229 } 1230 1231 putShort(nameLen, buf, CFH_FILENAME_LENGTH_OFFSET); 1232 1233 // extra field length 1234 putShort(extra.length, buf, CFH_EXTRA_LENGTH_OFFSET); 1235 1236 putShort(commentLen, buf, CFH_COMMENT_LENGTH_OFFSET); 1237 1238 // disk number start 1239 System.arraycopy(ZERO, 0, buf, CFH_DISK_NUMBER_OFFSET, SHORT); 1240 1241 // internal file attributes 1242 putShort(ze.getInternalAttributes(), buf, CFH_INTERNAL_ATTRIBUTES_OFFSET); 1243 1244 // external file attributes 1245 putLong(ze.getExternalAttributes(), buf, CFH_EXTERNAL_ATTRIBUTES_OFFSET); 1246 1247 // relative offset of LFH 1248 putLong(Math.min(lfhOffset, ZIP64_MAGIC), buf, CFH_LFH_OFFSET); 1249 1250 // file name 1251 System.arraycopy(name.array(), name.arrayOffset(), buf, CFH_FILENAME_OFFSET, nameLen); 1252 1253 int extraStart = CFH_FILENAME_OFFSET + nameLen; 1254 System.arraycopy(extra, 0, buf, extraStart, extra.length); 1255 1256 int commentStart = extraStart + extra.length; 1257 1258 // file comment 1259 System.arraycopy(commentB.array(), commentB.arrayOffset(), buf, commentStart, commentLen); 1260 return buf; 1261 } 1262 1263 /** 1264 * If the entry needs Zip64 extra information inside the central 1265 * directory then configure its data. 1266 */ 1267 private void handleZip64Extra(ZipArchiveEntry ze, long lfhOffset, 1268 boolean needsZip64Extra) { 1269 if (needsZip64Extra) { 1270 Zip64ExtendedInformationExtraField z64 = getZip64Extra(ze); 1271 if (ze.getCompressedSize() >= ZIP64_MAGIC 1272 || ze.getSize() >= ZIP64_MAGIC) { 1273 z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize())); 1274 z64.setSize(new ZipEightByteInteger(ze.getSize())); 1275 } else { 1276 // reset value that may have been set for LFH 1277 z64.setCompressedSize(null); 1278 z64.setSize(null); 1279 } 1280 if (lfhOffset >= ZIP64_MAGIC) { 1281 z64.setRelativeHeaderOffset(new ZipEightByteInteger(lfhOffset)); 1282 } 1283 ze.setExtra(); 1284 } 1285 } 1286 1287 /** 1288 * Writes the "End of central dir record". 1289 * @throws IOException on error 1290 * @throws Zip64RequiredException if the archive's size exceeds 4 1291 * GByte or there are more than 65535 entries inside the archive 1292 * and {@link Zip64Mode #setUseZip64} is {@link Zip64Mode#Never}. 1293 */ 1294 protected void writeCentralDirectoryEnd() throws IOException { 1295 writeCounted(EOCD_SIG); 1296 1297 // disk numbers 1298 writeCounted(ZERO); 1299 writeCounted(ZERO); 1300 1301 // number of entries 1302 int numberOfEntries = entries.size(); 1303 if (numberOfEntries > ZIP64_MAGIC_SHORT 1304 && zip64Mode == Zip64Mode.Never) { 1305 throw new Zip64RequiredException(Zip64RequiredException 1306 .TOO_MANY_ENTRIES_MESSAGE); 1307 } 1308 if (cdOffset > ZIP64_MAGIC && zip64Mode == Zip64Mode.Never) { 1309 throw new Zip64RequiredException(Zip64RequiredException 1310 .ARCHIVE_TOO_BIG_MESSAGE); 1311 } 1312 1313 byte[] num = ZipShort.getBytes(Math.min(numberOfEntries, 1314 ZIP64_MAGIC_SHORT)); 1315 writeCounted(num); 1316 writeCounted(num); 1317 1318 // length and location of CD 1319 writeCounted(ZipLong.getBytes(Math.min(cdLength, ZIP64_MAGIC))); 1320 writeCounted(ZipLong.getBytes(Math.min(cdOffset, ZIP64_MAGIC))); 1321 1322 // ZIP file comment 1323 ByteBuffer data = this.zipEncoding.encode(comment); 1324 int dataLen = data.limit() - data.position(); 1325 writeCounted(ZipShort.getBytes(dataLen)); 1326 streamCompressor.writeCounted(data.array(), data.arrayOffset(), dataLen); 1327 } 1328 1329 /** 1330 * Writes the "ZIP64 End of central dir record" and 1331 * "ZIP64 End of central dir locator". 1332 * @throws IOException on error 1333 * @since 1.3 1334 */ 1335 protected void writeZip64CentralDirectory() throws IOException { 1336 if (zip64Mode == Zip64Mode.Never) { 1337 return; 1338 } 1339 1340 if (!hasUsedZip64 1341 && (cdOffset >= ZIP64_MAGIC || cdLength >= ZIP64_MAGIC 1342 || entries.size() >= ZIP64_MAGIC_SHORT)) { 1343 // actually "will use" 1344 hasUsedZip64 = true; 1345 } 1346 1347 if (!hasUsedZip64) { 1348 return; 1349 } 1350 1351 long offset = streamCompressor.getTotalBytesWritten(); 1352 1353 writeOut(ZIP64_EOCD_SIG); 1354 // size, we don't have any variable length as we don't support 1355 // the extensible data sector, yet 1356 writeOut(ZipEightByteInteger 1357 .getBytes(SHORT /* version made by */ 1358 + SHORT /* version needed to extract */ 1359 + WORD /* disk number */ 1360 + WORD /* disk with central directory */ 1361 + DWORD /* number of entries in CD on this disk */ 1362 + DWORD /* total number of entries */ 1363 + DWORD /* size of CD */ 1364 + DWORD /* offset of CD */ 1365 )); 1366 1367 // version made by and version needed to extract 1368 writeOut(ZipShort.getBytes(ZIP64_MIN_VERSION)); 1369 writeOut(ZipShort.getBytes(ZIP64_MIN_VERSION)); 1370 1371 // disk numbers - four bytes this time 1372 writeOut(LZERO); 1373 writeOut(LZERO); 1374 1375 // number of entries 1376 byte[] num = ZipEightByteInteger.getBytes(entries.size()); 1377 writeOut(num); 1378 writeOut(num); 1379 1380 // length and location of CD 1381 writeOut(ZipEightByteInteger.getBytes(cdLength)); 1382 writeOut(ZipEightByteInteger.getBytes(cdOffset)); 1383 1384 // no "zip64 extensible data sector" for now 1385 1386 // and now the "ZIP64 end of central directory locator" 1387 writeOut(ZIP64_EOCD_LOC_SIG); 1388 1389 // disk number holding the ZIP64 EOCD record 1390 writeOut(LZERO); 1391 // relative offset of ZIP64 EOCD record 1392 writeOut(ZipEightByteInteger.getBytes(offset)); 1393 // total number of disks 1394 writeOut(ONE); 1395 } 1396 1397 /** 1398 * Write bytes to output or random access file. 1399 * @param data the byte array to write 1400 * @throws IOException on error 1401 */ 1402 protected final void writeOut(byte[] data) throws IOException { 1403 streamCompressor.writeOut(data, 0, data.length); 1404 } 1405 1406 1407 /** 1408 * Write bytes to output or random access file. 1409 * @param data the byte array to write 1410 * @param offset the start position to write from 1411 * @param length the number of bytes to write 1412 * @throws IOException on error 1413 */ 1414 protected final void writeOut(byte[] data, int offset, int length) 1415 throws IOException { 1416 streamCompressor.writeOut(data, offset, length); 1417 } 1418 1419 1420 private GeneralPurposeBit getGeneralPurposeBits(final int zipMethod, final boolean utfFallback) { 1421 GeneralPurposeBit b = new GeneralPurposeBit(); 1422 b.useUTF8ForNames(useUTF8Flag || utfFallback); 1423 if (isDeflatedToOutputStream(zipMethod)) { 1424 b.useDataDescriptor(true); 1425 } 1426 return b; 1427 } 1428 1429 private int versionNeededToExtract(final int zipMethod, final boolean zip64) { 1430 if (zip64) { 1431 return ZIP64_MIN_VERSION; 1432 } 1433 // requires version 2 as we are going to store length info 1434 // in the data descriptor 1435 return (isDeflatedToOutputStream(zipMethod)) ? 1436 DATA_DESCRIPTOR_MIN_VERSION : 1437 INITIAL_VERSION; 1438 } 1439 1440 private boolean isDeflatedToOutputStream(int zipMethod) { 1441 return zipMethod == DEFLATED && raf == null; 1442 } 1443 1444 1445 /** 1446 * Creates a new zip entry taking some information from the given 1447 * file and using the provided name. 1448 * 1449 * <p>The name will be adjusted to end with a forward slash "/" if 1450 * the file is a directory. If the file is not a directory a 1451 * potential trailing forward slash will be stripped from the 1452 * entry name.</p> 1453 * 1454 * <p>Must not be used if the stream has already been closed.</p> 1455 */ 1456 @Override 1457 public ArchiveEntry createArchiveEntry(File inputFile, String entryName) 1458 throws IOException { 1459 if (finished) { 1460 throw new IOException("Stream has already been finished"); 1461 } 1462 return new ZipArchiveEntry(inputFile, entryName); 1463 } 1464 1465 /** 1466 * Get the existing ZIP64 extended information extra field or 1467 * create a new one and add it to the entry. 1468 * 1469 * @since 1.3 1470 */ 1471 private Zip64ExtendedInformationExtraField 1472 getZip64Extra(ZipArchiveEntry ze) { 1473 if (entry != null) { 1474 entry.causedUseOfZip64 = !hasUsedZip64; 1475 } 1476 hasUsedZip64 = true; 1477 Zip64ExtendedInformationExtraField z64 = 1478 (Zip64ExtendedInformationExtraField) 1479 ze.getExtraField(Zip64ExtendedInformationExtraField 1480 .HEADER_ID); 1481 if (z64 == null) { 1482 /* 1483 System.err.println("Adding z64 for " + ze.getName() 1484 + ", method: " + ze.getMethod() 1485 + " (" + (ze.getMethod() == STORED) + ")" 1486 + ", raf: " + (raf != null)); 1487 */ 1488 z64 = new Zip64ExtendedInformationExtraField(); 1489 } 1490 1491 // even if the field is there already, make sure it is the first one 1492 ze.addAsFirstExtraField(z64); 1493 1494 return z64; 1495 } 1496 1497 /** 1498 * Is there a ZIP64 extended information extra field for the 1499 * entry? 1500 * 1501 * @since 1.3 1502 */ 1503 private boolean hasZip64Extra(ZipArchiveEntry ze) { 1504 return ze.getExtraField(Zip64ExtendedInformationExtraField 1505 .HEADER_ID) 1506 != null; 1507 } 1508 1509 /** 1510 * If the mode is AsNeeded and the entry is a compressed entry of 1511 * unknown size that gets written to a non-seekable stream the 1512 * change the default to Never. 1513 * 1514 * @since 1.3 1515 */ 1516 private Zip64Mode getEffectiveZip64Mode(ZipArchiveEntry ze) { 1517 if (zip64Mode != Zip64Mode.AsNeeded 1518 || raf != null 1519 || ze.getMethod() != DEFLATED 1520 || ze.getSize() != ArchiveEntry.SIZE_UNKNOWN) { 1521 return zip64Mode; 1522 } 1523 return Zip64Mode.Never; 1524 } 1525 1526 private ZipEncoding getEntryEncoding(ZipArchiveEntry ze) { 1527 boolean encodable = zipEncoding.canEncode(ze.getName()); 1528 return !encodable && fallbackToUTF8 1529 ? ZipEncodingHelper.UTF8_ZIP_ENCODING : zipEncoding; 1530 } 1531 1532 private ByteBuffer getName(ZipArchiveEntry ze) throws IOException { 1533 return getEntryEncoding(ze).encode(ze.getName()); 1534 } 1535 1536 /** 1537 * Closes the underlying stream/file without finishing the 1538 * archive, the result will likely be a corrupt archive. 1539 * 1540 * <p>This method only exists to support tests that generate 1541 * corrupt archives so they can clean up any temporary files.</p> 1542 */ 1543 void destroy() throws IOException { 1544 if (raf != null) { 1545 raf.close(); 1546 } 1547 if (out != null) { 1548 out.close(); 1549 } 1550 } 1551 1552 /** 1553 * enum that represents the possible policies for creating Unicode 1554 * extra fields. 1555 */ 1556 public static final class UnicodeExtraFieldPolicy { 1557 /** 1558 * Always create Unicode extra fields. 1559 */ 1560 public static final UnicodeExtraFieldPolicy ALWAYS = new UnicodeExtraFieldPolicy("always"); 1561 /** 1562 * Never create Unicode extra fields. 1563 */ 1564 public static final UnicodeExtraFieldPolicy NEVER = new UnicodeExtraFieldPolicy("never"); 1565 /** 1566 * Create Unicode extra fields for filenames that cannot be 1567 * encoded using the specified encoding. 1568 */ 1569 public static final UnicodeExtraFieldPolicy NOT_ENCODEABLE = 1570 new UnicodeExtraFieldPolicy("not encodeable"); 1571 1572 private final String name; 1573 private UnicodeExtraFieldPolicy(String n) { 1574 name = n; 1575 } 1576 @Override 1577 public String toString() { 1578 return name; 1579 } 1580 } 1581 1582 /** 1583 * Structure collecting information for the entry that is 1584 * currently being written. 1585 */ 1586 private static final class CurrentEntry { 1587 private CurrentEntry(ZipArchiveEntry entry) { 1588 this.entry = entry; 1589 } 1590 /** 1591 * Current ZIP entry. 1592 */ 1593 private final ZipArchiveEntry entry; 1594 /** 1595 * Offset for CRC entry in the local file header data for the 1596 * current entry starts here. 1597 */ 1598 private long localDataStart = 0; 1599 /** 1600 * Data for local header data 1601 */ 1602 private long dataStart = 0; 1603 /** 1604 * Number of bytes read for the current entry (can't rely on 1605 * Deflater#getBytesRead) when using DEFLATED. 1606 */ 1607 private long bytesRead = 0; 1608 /** 1609 * Whether current entry was the first one using ZIP64 features. 1610 */ 1611 private boolean causedUseOfZip64 = false; 1612 /** 1613 * Whether write() has been called at all. 1614 * 1615 * <p>In order to create a valid archive {@link 1616 * #closeArchiveEntry closeArchiveEntry} will write an empty 1617 * array to get the CRC right if nothing has been written to 1618 * the stream at all.</p> 1619 */ 1620 private boolean hasWritten; 1621 } 1622 1623}