tikz-gallery-generator

Custum build of stapix for tikz.pablopie.xyz

Commit
4193a04e494e6fb5203a986666667d767b683839
Parent
0bf8476078c1f52bce5bac346906c479085b6855
Author
Pablo <pablo-pie@riseup.net>
Date

Reimplemented code for downsampling the thumbnails

Diffstats

2 files changed, 301 insertions, 162 deletions

Status Name Changes Insertions Deletions
Modified src/image.rs 2 files changed 294 143
Modified src/main.rs 2 files changed 7 19
diff --git a/src/image.rs b/src/image.rs
@@ -12,12 +12,14 @@
 //! Working with these crates directly also allows us the oportunity to
 //! streamline pixel format conversions. For example, when parsing JPEG we now
 //! feed the YUV pixel data directly to `libwebp` instead of first converting
-//! it to RGB and then converting it back to YUV (which is what `image` would
+//! it to  and then converting it back to YUV (which is what `image` would
 //! have done).
+#![allow(clippy::identity_op)]
+#![allow(clippy::needless_range_loop)]
 
 use std::{
   slice,
-  ptr,
+  mem::{self, MaybeUninit},
   io::{self, Write, Read, BufReader},
   path::Path,
   fs::File,
@@ -34,22 +36,31 @@ use libwebp_sys::WebPEncodingError;
 
 use crate::create_file;
 
-#[derive(Clone, Debug)]
+#[derive(Debug)]
 pub struct Image {
+  width:     usize,
+  height:    usize,
+
   pixels: PixelBuffer,
-  pub width:  u32,
-  pub height: u32,
 }
 
-#[derive(Clone, Debug)]
+#[derive(Debug)]
+#[allow(clippy::upper_case_acronyms)]
 enum PixelBuffer {
-  Yuv { y: Vec<u8>, u: Vec<u8>, v: Vec<u8>, uv_stride: u32, },
-  Grayscale { level: Vec<u8>, alpha: Option<Vec<u8>>, },
-  Rgb(Vec<u8>),
-  Rgba(Vec<u8>),
+  YCbCr {
+    y:  Vec<u8>,
+    cb: Vec<u8>,
+    cr: Vec<u8>,
+    a:  Option<Vec<u8>>,
+    uv_stride: usize,
+  },
+  BGRA(Vec<u8>),
 }
 
-pub fn parse_jpeg(path: &Path) -> Result<Image, ()> {
+/// Parses the Y Cb Cr data from a JPEG, downsampling to fit `TARGET_HEIGHT`
+/// along the way
+// NOTE: using an approximation of nearest neighbor for efficiency
+pub fn parse_jpeg(path: &Path, target_height: usize) -> Result<Image, ()> {
   let options = DecoderOptions::default()
     .jpeg_set_out_colorspace(ColorSpace::YCbCr);
 
@@ -64,58 +75,80 @@ pub fn parse_jpeg(path: &Path) -> Result {
     }
   };
 
+  // ==========================================================================
   let info = decoder.info().expect("should have already parsed JPEG header");
-
   let options = decoder.options();
 
-  let width  = info.width  as u32;
-  let height = info.height as u32;
+  let src_width  = info.width  as usize;
+  let src_height = info.height as usize;
+
+  let (width, height) = if src_height > target_height {
+    (src_width * target_height / src_height, target_height)
+  } else {
+    (src_width, src_height)
+  };
+
   let uv_width  = width.div_ceil(2);
   let uv_height = height.div_ceil(2);
 
-  assert_expected_pixels_len(
-    width as usize,
-    height as usize,
-    3,
-    pixels.len()
-  );
-  let y_len:  usize = (width * height) as _;
-  let uv_len: usize = (uv_width * uv_height) as _;
+  assert_expected_pixels_len(src_width, src_height, 3, pixels.len());
+  let y_len  = width * height;
+  let uv_len = uv_width * uv_height;
 
   let colorspace = options.jpeg_get_out_colorspace();
   debug_assert!(colorspace == ColorSpace::YCbCr,
                 "unexpected colorspace when parsing JPEG: {colorspace:?}");
 
-  let mut y = vec![0; y_len];
-  let mut u = vec![0; uv_len];
-  let mut v = vec![0; uv_len];
-
-  // TODO: [optmize]: can we optimize these loops?
-  for i in 0..y_len { y[i] = pixels[i*3]; }
-  for uv_x in 0..uv_width {
-    for uv_y in 0..uv_height {
-      let x = uv_x * 2;
-      let y = uv_y * 2;
-
-      let i: usize = (y * width + x) as _;
-      let j: usize = (uv_y * uv_width + uv_x) as _;
-
-      // NOTE: using an approximation of nearest neighbor for efficient
-      let u_val = pixels[i*3+1];
-      let v_val = pixels[i*3+2];
-      u[j] = u_val;
-      v[j] = v_val;
+  // ==========================================================================
+  let mut y  = vec![MaybeUninit::uninit(); y_len];
+  let mut cb = vec![MaybeUninit::uninit(); uv_len];
+  let mut cr = vec![MaybeUninit::uninit(); uv_len];
+
+  if height == target_height {
+    // TODO: [optmize]: can we use SIMD here?
+    for dst_i in 0..y_len {
+      let dst_x = dst_i % width;
+      let dst_y = dst_i / width;
+
+      let src_x = dst_x *  src_width / width;
+      let src_y = dst_y * src_height / height;
+
+      let src_i = src_y * src_width + src_x;
+      y[dst_i] = MaybeUninit::new(pixels[src_i*3]);
     }
+  } else {
+    for i in 0..y_len { y[i] = MaybeUninit::new(pixels[i*3]); }
   }
 
+  // TODO: [optmize]: can we use SIMD here?
+  for dst_i in 0..uv_len {
+    let dst_x = dst_i % uv_width;
+    let dst_y = dst_i / uv_width;
+
+    let src_x = dst_x * src_width  / uv_width;
+    let src_y = dst_y * src_height / uv_height;
+
+    let src_i = src_y * src_width + src_x;
+    cb[dst_i] = MaybeUninit::new(pixels[src_i*3+1]);
+    cr[dst_i] = MaybeUninit::new(pixels[src_i*3+2]);
+  }
+
+  // ==========================================================================
+  let y  = unsafe { mem::transmute::<Vec<MaybeUninit<u8>>, Vec<u8>>(y) };
+  let cb = unsafe { mem::transmute::<Vec<MaybeUninit<u8>>, Vec<u8>>(cb) };
+  let cr = unsafe { mem::transmute::<Vec<MaybeUninit<u8>>, Vec<u8>>(cr) };
+
   Ok(Image {
-    pixels: PixelBuffer::Yuv { y, u, v, uv_stride: uv_width, },
     width,
     height,
+
+    pixels: PixelBuffer::YCbCr { y, cb, cr, a: None, uv_stride: uv_width, },
   })
 }
 
-pub fn parse_png(path: &Path) -> Result<Image, ()> {
+/// Parses the Y Cb Cr data from a PNG, downsampling to fit `TARGET_HEIGHT`
+/// along the way
+pub fn parse_png(path: &Path, target_height: usize) -> Result<Image, ()> {
   let options = DecoderOptions::default()
     .png_set_strip_to_8bit(true);
 
@@ -144,68 +177,217 @@ pub fn parse_png(path: &Path) -> Result {
     }
   };
 
-  let (width, height) = decoder
+  // ==========================================================================
+  let (src_width, src_height) = decoder
     .dimensions()
     .expect("should already have decoded PNG headers");
+
+  let (width, height) = if src_height > target_height {
+    (src_width * target_height / src_height, target_height)
+  } else {
+    (src_width, src_height)
+  };
+
+  // ==========================================================================
   let colorspace = decoder
     .colorspace()
     .expect("should already have decoded PNG headers");
 
-  match colorspace {
-    ColorSpace::RGB => {
-      assert_expected_pixels_len(width, height, 3, pixels.len());
+  debug_assert!(
+    matches!(
+      colorspace,
+      ColorSpace::RGB | ColorSpace::RGBA | ColorSpace::Luma | ColorSpace::LumaA
+    ),
+    "unexpected colorspace when parsing PNG: {colorspace:?}",
+  );
+
+  let has_alpha = matches!(colorspace, ColorSpace::RGBA | ColorSpace::LumaA);
+  let is_grayscale = matches!(
+    colorspace,
+    ColorSpace::Luma | ColorSpace::LumaA,
+  );
+
+  let mut bps = 1;
+  if !is_grayscale { bps += 2; }
+  if has_alpha     { bps += 1; }
+  assert_expected_pixels_len(src_width, src_height, bps, pixels.len());
 
-      Ok(Image {
-        pixels: PixelBuffer::Rgb(pixels),
-        width:  width  as u32,
-        height: height as u32,
-      })
+  // ==========================================================================
+  // handle grayscale input
+
+  struct GrayscaleData {
+    y: Vec<u8>,
+    a: Option<Vec<u8>>,
+    uv_stride: usize,
+    uv_len:    usize,
+  }
+
+  #[inline]
+  #[allow(clippy::too_many_arguments)]
+  fn parse_ya_channels(
+    width:     usize, height:     usize,
+    src_width: usize, src_height: usize,
+    pixels: Vec<u8>,
+    target_height: usize,
+    bps: usize, has_alpha: bool,
+  ) -> GrayscaleData {
+    let uv_width  = width.div_ceil(2);
+    let uv_height = height.div_ceil(2);
+    let y_len  =    width * height;
+    let uv_len = uv_width * uv_height;
+
+    if !has_alpha && height < target_height {
+      return GrayscaleData {
+        y: pixels,
+        a: None,
+        uv_stride: uv_width,
+        uv_len,
+      };
     }
-    ColorSpace::RGBA => {
-      assert_expected_pixels_len(width, height, 4, pixels.len());
-
-      Ok(Image {
-        pixels: PixelBuffer::Rgba(pixels),
-        width:  width  as u32,
-        height: height as u32,
-      })
+
+    if has_alpha && height < target_height {
+      let mut y = vec![MaybeUninit::uninit(); y_len];
+      let mut a = vec![MaybeUninit::uninit(); y_len];
+
+      // TODO: [optimize]: can we optmize this?
+      for i in 0..y_len {
+        y[i] = MaybeUninit::new(pixels[i*2+0]);
+        a[i] = MaybeUninit::new(pixels[i*2+1]);
+      }
+
+      let y = unsafe { mem::transmute::<Vec<MaybeUninit<u8>>, Vec<u8>>(y) };
+      let a = unsafe { mem::transmute::<Vec<MaybeUninit<u8>>, Vec<u8>>(a) };
+      return GrayscaleData { y, a: Some(a), uv_stride: uv_width, uv_len, };
+    }
+
+    let mut y = vec![MaybeUninit::uninit(); y_len];
+    for dst_i in 0..y_len {
+      let dst_x = dst_i % width;
+      let dst_y = dst_i / width;
+
+      let src_x = dst_x * src_width  / width;
+      let src_y = dst_y * src_height / height;
+
+      let src_i = src_y * src_width + src_x;
+      y[dst_i] = MaybeUninit::new(pixels[src_i*bps]);
     }
-    ColorSpace::Luma => {
-      assert_expected_pixels_len(width, height, 1, pixels.len());
-
-      Ok(Image {
-        pixels: PixelBuffer::Grayscale { level: pixels, alpha: None, },
-        width:  width  as u32,
-        height: height as u32,
-      })
+    let y = unsafe { mem::transmute::<Vec<MaybeUninit<u8>>, Vec<u8>>(y) };
+
+    if !has_alpha {
+      return GrayscaleData { y, a: None, uv_stride: uv_width, uv_len, };
     }
-    ColorSpace::LumaA => {
-      assert_expected_pixels_len(width, height, 2, pixels.len());
 
-      let len = pixels.len()/2;
-      let mut level = vec![0; len];
-      let mut alpha = vec![0; len];
+    let mut a = vec![MaybeUninit::uninit(); y_len];
+    for dst_i in 0..y_len {
+      let dst_x = dst_i % width;
+      let dst_y = dst_i / width;
 
-      #[allow(clippy::identity_op)]
+      let src_x = dst_x * src_width  / width;
+      let src_y = dst_y * src_height / height;
+
+      let src_i = src_y * src_width + src_x;
+      a[dst_i] = MaybeUninit::new(pixels[src_i*2+1]);
+    }
+    let a = unsafe { mem::transmute::<Vec<MaybeUninit<u8>>, Vec<u8>>(a) };
+
+    GrayscaleData { y, a: Some(a), uv_stride: uv_width, uv_len, }
+  }
+
+  if is_grayscale {
+    let GrayscaleData { y, a, uv_stride, uv_len, } = parse_ya_channels(
+      width,     height,
+      src_width, src_height,
+      pixels,
+      target_height,
+      bps, has_alpha,
+    );
+
+    return Ok(Image {
+      width,
+      height,
+
+      pixels: PixelBuffer::YCbCr {
+        y, a, cb: vec![128; uv_len], cr: vec![128; uv_len],
+        uv_stride,
+      },
+    });
+  }
+
+  // ==========================================================================
+  // handle RGB/RGBA input
+
+  #[inline]
+  #[allow(clippy::too_many_arguments)]
+  fn parse_bgra_channels(
+    width:     usize, height:     usize,
+    src_width: usize, src_height: usize,
+    pixels: &[u8],
+    target_height: usize,
+    bps: usize, has_alpha: bool,
+  ) -> Vec<MaybeUninit<u8>> {
+    let len = width * height;
+    let mut bgra_data = vec![MaybeUninit::uninit(); len * bps];
+
+    if height < target_height {
       for i in 0..len {
-        level[i] = pixels[i*2+0];
-        alpha[i] = pixels[i*2+1];
+        bgra_data[i*bps+0] = MaybeUninit::new(pixels[i*bps+2]);
+        bgra_data[i*bps+1] = MaybeUninit::new(pixels[i*bps+1]);
+        bgra_data[i*bps+2] = MaybeUninit::new(pixels[i*bps+0]);
       }
 
-      Ok(Image {
-        pixels: PixelBuffer::Grayscale { level, alpha: Some(alpha), },
-        width:  width  as u32,
-        height: height as u32,
-      })
+      if has_alpha {
+        for i in 0..len {
+          bgra_data[i*4+3] = MaybeUninit::new(pixels[i*4+3]);
+        }
+      }
+
+      return bgra_data;
+    }
+
+    for dst_i in 0..len {
+      let dst_x = dst_i % width;
+      let dst_y = dst_i / width;
+
+      let src_x = dst_x * src_width  / width;
+      let src_y = dst_y * src_height / height;
+
+      let src_i = src_y * src_width + src_x;
+      bgra_data[dst_i*bps+0] = MaybeUninit::new(pixels[src_i*bps+2]);
+      bgra_data[dst_i*bps+1] = MaybeUninit::new(pixels[src_i*bps+1]);
+      bgra_data[dst_i*bps+2] = MaybeUninit::new(pixels[src_i*bps+0]);
     }
-    colorspace => {
-      unreachable!("unexpected colorspace when parsing PNG: {colorspace:?}")
+
+    if has_alpha {
+      for dst_i in 0..len {
+        let dst_x = dst_i % width;
+        let dst_y = dst_i / width;
+
+        let src_x = dst_x * src_width  / width;
+        let src_y = dst_y * src_height / height;
+
+        let src_i = src_y * src_width + src_x;
+        bgra_data[dst_i*4+3] = MaybeUninit::new(pixels[src_i*4+3]);
+      }
     }
+
+    bgra_data
   }
+  
+  let bgra_data = parse_bgra_channels(
+    width,     height,
+    src_width, src_height,
+    &pixels,
+    target_height,
+    bps, has_alpha,
+  );
+  let bgra_data = unsafe {
+    mem::transmute::<Vec<MaybeUninit<u8>>, Vec<u8>>(bgra_data)
+  };
+
+  Ok(Image { width, height, pixels: PixelBuffer::BGRA(bgra_data), })
 }
 
 pub fn encode_webp(img: &Image, output_path: &Path) -> Result<(), ()> {
-  use std::mem::MaybeUninit;
   use libwebp_sys::*;
   const WEBP_IMAGE_QUALITY: f32 = 90.0;
 
@@ -230,82 +412,50 @@ pub fn encode_webp(img: &Image, output_path: &Path) -> Result<(), ()> {
     WebPMemoryWriterInit(ww.as_mut_ptr());
     let mut ww: WebPMemoryWriter = ww.assume_init();
 
-    // NOTE: we cannot re-use picture because WebPPictureAlloc,
-    //       WebPPictureImportRGB & WebPPictureImportRGBA all free the pixel
-    //       buffers before reallocating them
+    // NOTE: it makes no sence to re-use picture since there is no allocated
+    //       data other then its fields
     let mut picture = MaybeUninit::uninit();
     let init = WebPPictureInit(picture.as_mut_ptr());
     let mut picture: WebPPicture = picture.assume_init();
     debug_assert!(init, "libwebp version mismatch");
 
-    // NOTE: the libwebp documentation recommends using YUV (which is the
-    //       default in WebPPictureInit) when doing lossy compression
-    // picture.use_argb = 0;
+
     picture.width  = img.width  as i32;
     picture.height = img.height as i32;
-    let import_status = match &img.pixels {
-      PixelBuffer::Yuv { y, u, v, uv_stride, } => {
-        // NOTE: setting picture.y, picture.u & picture.v manually to avoid
-        //       recopying the pixel buffers
-        //
-        //       we must indicate picture is a "view" (i.e. does not own the
-        //       pixel buffer memory) by setting picture.memory_ to NULL:
-        //       libwebp/src/enc/picture_rescale_enc.c:97
-        picture.memory_   = ptr::null_mut();
+
+    // NOTE: setting the planes manually for efficciency
+    match &img.pixels {
+      PixelBuffer::YCbCr { y, cb, cr, a, uv_stride, } => {
+        // picture.colorspace = WebPEncCSP::WEBP_YUV420; // already default
+        // picture.use_argb = 0;                         // already default
         picture.y_stride  = img.width  as i32;
         picture.uv_stride = *uv_stride as i32;
-        picture.y = y.as_ptr() as *mut _;
-        picture.u = u.as_ptr() as *mut _;
-        picture.v = v.as_ptr() as *mut _;
-
-        debug_assert!(WebPPictureIsView(&picture) != 0,
-                      "picture should have been configured as \"view\"");
-        true
-      }
-      PixelBuffer::Grayscale { level, alpha, } => {
-        if alpha.is_some() { picture.colorspace = WebPEncCSP::WEBP_YUV420A; }
-        let status = WebPPictureAlloc(&mut picture) != 0;
-        if status {
-          let uv_width:  usize = img.width.div_ceil(2)  as _;
-          let uv_height: usize = img.height.div_ceil(2) as _;
-          let uv_len = uv_width * uv_height;
-
-          ptr::copy_nonoverlapping(level.as_ptr(), picture.y, level.len());
-          ptr::write_bytes(picture.u, 128, uv_len);
-          ptr::write_bytes(picture.v, 128, uv_len);
-          if let Some(alpha) = alpha {
-            ptr::copy_nonoverlapping(alpha.as_ptr(), picture.a, alpha.len());
-          }
+        picture.y =  y.as_ptr() as *mut _;
+        picture.u = cb.as_ptr() as *mut _;
+        picture.v = cr.as_ptr() as *mut _;
+
+        if let Some(a) = a {
+          picture.colorspace = WebPEncCSP::WEBP_YUV420A;
+          picture.a_stride   = img.width as i32;
+          picture.a          = a.as_ptr() as *mut _;
         }
-
-        status
-      }
-      PixelBuffer::Rgb(pixels) => {
-        WebPPictureImportRGB(
-          &mut picture,
-          pixels.as_ptr(),
-          img.width as i32 * 3,
-        ) != 0
       }
-      PixelBuffer::Rgba(pixels) => {
-        WebPPictureImportRGBA(
-          &mut picture,
-          pixels.as_ptr(),
-          img.width as i32 * 4,
-        ) != 0
+      PixelBuffer::BGRA(bgra_data) => {
+        picture.use_argb    = 1;
+        picture.argb_stride = img.width as i32;
+        picture.argb        = bgra_data.as_ptr() as *mut _;
       }
-    };
-    if !import_status {
-      log_webp_memmory_error(picture.error_code, output_path);
-      return Err(());
     }
-    picture.writer = Some(WebPMemoryWrite);
+
+    debug_assert!(WebPPictureIsView(&picture) != 0,
+                  "picture should have been configured as \"view\" by not calling WebPPictureAlloc or any of the \"Import\" routines");
+
+    picture.writer     = Some(WebPMemoryWrite);
     picture.custom_ptr = &mut ww as *mut WebPMemoryWriter as _;
 
     // ======================================================================
     let status = WebPEncode(&config, &mut picture) != 0;
-
-    let bytes = if status {
+    let bytes  = if status {
       slice::from_raw_parts(ww.mem, ww.size)
     } else {
       log_webp_memmory_error(picture.error_code, output_path);
@@ -320,6 +470,7 @@ pub fn encode_webp(img: &Image, output_path: &Path) -> Result<(), ()> {
     }
 
     WebPPictureFree(&mut picture);
+    WebPMemoryWriterClear(&mut ww);
   }
 
   Ok(())
diff --git a/src/main.rs b/src/main.rs
@@ -434,17 +434,7 @@ fn write_license(f: &mut File) -> io::Result<()> {
 }
 
 fn render_thumbnail(pic: &GalleryEntry, thumb_path: &Path) -> Result<(), ()> {
-  const TARGET_HEIGHT: u32 = 500;
-
-  #[inline]
-  fn warn_img_size(src_width: u32, src_height: u32, src_path: &Path) {
-    let target_width = src_width * TARGET_HEIGHT / src_height;
-    let scale_x      = src_width / target_width;
-
-    if scale_x > 1 {
-      warnln!("Thumbnail for {src_path:?}? {src_width}×{src_height}: thumbnails are not downsampled");
-    }
-  }
+  const TARGET_HEIGHT: usize = 500;
 
   match pic.file_format {
     FileFormat::TeX => {
@@ -490,17 +480,15 @@ fn render_thumbnail(pic: &GalleryEntry, thumb_path: &Path) -> Result<(), ()> {
       copy(&src_path, thumb_path)?;
     }
     FileFormat::Jpeg => {
-      // NOTE: we do not rescale the images since the scale-factor for the
-      //       actual images included in our gallery should always be 1
-      let img = image::parse_jpeg(&pic.path)?;
-      warn_img_size(img.width, img.height, &pic.path);
+      // NOTE: even if the picture is no taller than TARGET_HEIGHT * 2, it is
+      //       faster to downsample and then encode
+      let img = image::parse_jpeg(&pic.path, TARGET_HEIGHT)?;
       image::encode_webp(&img, thumb_path)?;
     }
     FileFormat::Png => {
-      // NOTE: we do not rescale the images since the scale-factor for the
-      //       actual images included in our gallery should always be 1
-      let img = image::parse_png(&pic.path)?;
-      warn_img_size(img.width, img.height, &pic.path);
+      // NOTE: even if the picture is no taller than TARGET_HEIGHT * 2, it is
+      //       faster to downsample and then encode
+      let img = image::parse_png(&pic.path, TARGET_HEIGHT)?;
       image::encode_webp(&img, thumb_path)?;
     }
   }