Skip to content

Commit 77379d7

Browse files
committed
remove breaking test
1 parent 2569a34 commit 77379d7

File tree

1 file changed

+0
-186
lines changed

1 file changed

+0
-186
lines changed

src/storage/mod.rs

Lines changed: 0 additions & 186 deletions
Original file line numberDiff line numberDiff line change
@@ -1456,192 +1456,6 @@ mod test {
14561456
assert_eq!(detected_mime, expected_mime);
14571457
}
14581458

1459-
#[tokio::test(flavor = "multi_thread")]
1460-
async fn test_recompress_just_check() -> Result<()> {
1461-
let env = TestEnvironment::with_config(
1462-
TestEnvironment::base_config()
1463-
.storage_backend(StorageKind::S3)
1464-
.build()?,
1465-
)
1466-
.await?;
1467-
1468-
let storage = env.async_storage();
1469-
1470-
const KRATE: &str = "test_crate";
1471-
let rid = env
1472-
.fake_release()
1473-
.await
1474-
.name(KRATE)
1475-
.version(V0_1)
1476-
.archive_storage(true)
1477-
.keywords(vec!["kw 1".into(), "kw 2".into()])
1478-
.create()
1479-
.await?;
1480-
1481-
// run the recompression logic
1482-
let mut conn = env.async_db().async_conn().await;
1483-
let (checked, recompressed) = storage
1484-
.recompress_index_files_in_bucket(&mut conn, None, None, None)
1485-
.await?;
1486-
assert_eq!(checked, 2);
1487-
assert_eq!(recompressed, 0);
1488-
1489-
assert!(
1490-
storage
1491-
.get(&rustdoc_archive_path(KRATE, &V0_1), usize::MAX)
1492-
.await
1493-
.is_ok()
1494-
);
1495-
assert!(
1496-
storage
1497-
.get(&source_archive_path(KRATE, &V0_1), usize::MAX)
1498-
.await
1499-
.is_ok()
1500-
);
1501-
1502-
// release-id-min = the target release id for the iterator
1503-
// (we start at the latest, and go down).
1504-
// So setting that "target" to rid.0 + 1 means we stop before we hit our only release.
1505-
let (checked, recompressed) = storage
1506-
.recompress_index_files_in_bucket(&mut conn, Some(ReleaseId(rid.0 + 1)), None, None)
1507-
.await?;
1508-
assert_eq!(checked, 0);
1509-
assert_eq!(recompressed, 0);
1510-
1511-
// release-id-max = where we start iterating the releases
1512-
// (we start at the max, and go down).
1513-
// So setting that "start" to rid.0 - 1 means we start behind our only release
1514-
let (checked, recompressed) = storage
1515-
.recompress_index_files_in_bucket(&mut conn, None, Some(ReleaseId(rid.0 - 1)), None)
1516-
.await?;
1517-
assert_eq!(checked, 0);
1518-
assert_eq!(recompressed, 0);
1519-
1520-
// setting min & max to the same value that is also our only release
1521-
// tests if we filter as inclusive range.
1522-
let (checked, recompressed) = storage
1523-
.recompress_index_files_in_bucket(&mut conn, Some(rid), Some(rid), None)
1524-
.await?;
1525-
assert_eq!(checked, 2);
1526-
assert_eq!(recompressed, 0);
1527-
1528-
Ok(())
1529-
}
1530-
1531-
#[tokio::test(flavor = "multi_thread")]
1532-
async fn test_recompress_index_files_in_bucket() -> Result<()> {
1533-
use std::io::Cursor;
1534-
use tokio::io;
1535-
1536-
let env = TestEnvironment::with_config(
1537-
TestEnvironment::base_config()
1538-
.storage_backend(StorageKind::S3)
1539-
.build()?,
1540-
)
1541-
.await?;
1542-
1543-
const CONTENT: &[u8] = b"Hello, world! Hello, world! Hello, world! Hello, world!";
1544-
let alg = Some(CompressionAlgorithm::Zstd);
1545-
1546-
use async_compression::tokio::write;
1547-
1548-
let broken_archive = {
1549-
// broken compression implementation, `.shutdown` missing.
1550-
let mut buf = Vec::new();
1551-
let mut enc = write::ZstdEncoder::new(&mut buf);
1552-
io::copy(&mut Cursor::new(CONTENT), &mut enc).await?;
1553-
// check if it's really broken, EOF missing
1554-
assert_ne!(buf.last_chunk::<3>().unwrap(), &ZSTD_EOF_BYTES);
1555-
buf
1556-
};
1557-
1558-
const KRATE: &str = "test_crate";
1559-
env.fake_release()
1560-
.await
1561-
.name(KRATE)
1562-
.version(V0_1)
1563-
.archive_storage(true)
1564-
.keywords(vec!["kw 1".into(), "kw 2".into()])
1565-
.create()
1566-
.await?;
1567-
1568-
let storage = env.async_storage();
1569-
// delete everything in storage created by the fake_release above
1570-
for p in &["rustdoc/", "sources/"] {
1571-
storage.delete_prefix(p).await?;
1572-
}
1573-
1574-
// use raw inner storage backend so we can fetch the compressed file without automatic
1575-
// decompression
1576-
let StorageBackend::S3(raw_storage) = &storage.backend else {
1577-
panic!("S3 backend set above");
1578-
};
1579-
1580-
let index_path = format!("{}.index", rustdoc_archive_path(KRATE, &V0_1));
1581-
1582-
// upload as-is to the storage, into the place of an archive index.
1583-
// `.store_inner` doesn't compress
1584-
storage
1585-
.store_inner(vec![Blob {
1586-
path: index_path.clone(),
1587-
mime: mime::APPLICATION_OCTET_STREAM,
1588-
date_updated: Utc::now(),
1589-
content: broken_archive.clone(),
1590-
compression: alg,
1591-
}])
1592-
.await?;
1593-
1594-
// validate how the old compressed blob looks like, even though we just uploaded it
1595-
let old_compressed_blob = raw_storage
1596-
.get_stream(&index_path, None)
1597-
.await?
1598-
.materialize(usize::MAX)
1599-
.await?;
1600-
assert_eq!(old_compressed_blob.compression, alg);
1601-
1602-
// try getting the decompressed broken blob via normal storage API.
1603-
// old async-compression can do this without choking.
1604-
assert_eq!(
1605-
CONTENT,
1606-
&storage.get(&index_path, usize::MAX).await?.content
1607-
);
1608-
1609-
// run the recompression logic
1610-
let mut conn = env.async_db().async_conn().await;
1611-
let (checked, recompressed) = storage
1612-
.recompress_index_files_in_bucket(&mut conn, None, None, None)
1613-
.await?;
1614-
assert_eq!(checked, 1);
1615-
assert_eq!(recompressed, 1);
1616-
1617-
let new_compressed_blob = raw_storage
1618-
.get_stream(&index_path, None)
1619-
.await?
1620-
.materialize(usize::MAX)
1621-
.await?;
1622-
assert_eq!(new_compressed_blob.compression, alg);
1623-
1624-
// after fixing, getting the decompressed blob via normal storage API still works
1625-
assert_eq!(
1626-
CONTENT,
1627-
&storage.get(&index_path, usize::MAX).await?.content
1628-
);
1629-
1630-
// after recompression the content length should be different, 3 bytes more for
1631-
// the zstd EOF
1632-
assert_eq!(
1633-
new_compressed_blob.content.len(),
1634-
old_compressed_blob.content.len() + ZSTD_EOF_BYTES.len()
1635-
);
1636-
1637-
assert_eq!(
1638-
[&old_compressed_blob.content[..], &ZSTD_EOF_BYTES].concat(),
1639-
new_compressed_blob.content
1640-
);
1641-
1642-
Ok(())
1643-
}
1644-
16451459
#[tokio::test(flavor = "multi_thread")]
16461460
async fn test_outdated_local_archive_index_gets_redownloaded() -> Result<()> {
16471461
use tokio::fs;

0 commit comments

Comments
 (0)