A Study on Self-Supervised Object Detection Pretraining

Cite

@InProceedings{10.1007/978-3-031-25069-9_6,
author=”Dang, Trung
and Kornblith, Simon
and Nguyen, Huy Thong
and Chin, Peter
and Khademi, Maryam”,
editor=”Karlinsky, Leonid
and Michaeli, Tomer
and Nishino, Ko”,
title=”A Study on Self-Supervised Object Detection Pretraining”,
booktitle=”Computer Vision — ECCV 2022 Workshops”,
year=”2023″,
publisher=”Springer Nature Switzerland”,
address=”Cham”,
pages=”86–99″,
abstract=”In this work, we study different approaches to self-supervised pretraining of object detection models. We first design a general framework to learn a spatially consistent dense representation from an image, by randomly sampling and projecting boxes to each augmented view and maximizing the similarity between corresponding box features. We study existing design choices in the literature, such as box generation, feature extraction strategies, and using multiple views inspired by its success on instance-level image representation learning techniques [6, 7]. Our results suggest that the method is robust to different choices of hyperparameters, and using multiple views is not as effective as shown for instance-level image representation learning. We also design two auxiliary tasks to predict boxes in one view from their features in the other view, by (1) predicting boxes from the sampled set by using a contrastive loss, and (2) predicting box coordinates using a transformer, which potentially benefits downstream object detection tasks. We found that these tasks do not lead to better object detection performance when finetuning the pretrained model on labeled data.”,
isbn=”978-3-031-25069-9″
}

Substitutional Neural Image Compression

Cite

@INPROCEEDINGS{wang-substitutional-2021,
author={Wang, Xiao and Ding, Ding and Jiang, Wei and Wang, Wei and Xu, Xiaozhong and Liu, Shan and Kulis, Brian and Chin, Peter},
booktitle={2022 Picture Coding Symposium (PCS)},
title={Substitutional Neural Image Compression},
year={2022},
volume={},
number={},
pages={97-101},
doi={10.1109/PCS56426.2022.10018005}}

Block Switching: A Stochastic Approach for Deep Learning Security

Publication:

Cite

@misc{wang_block_2020,
abstract = {Recent study of adversarial attacks has revealed the vulnerability of modern deep learning models. That is, subtly crafted perturbations of the input can make a trained network with high accuracy produce arbitrary incorrect predictions, while maintain imperceptible to human vision system. In this paper, we introduce Block Switching (BS), a defense strategy against adversarial attacks based on stochasticity. BS replaces a block of model layers with multiple parallel channels, and the active channel is randomly assigned in the run time hence unpredictable to the adversary. We show empirically that BS leads to a more dispersed input gradient distribution and superior defense effectiveness compared with other stochastic defenses such as stochastic activation pruning (SAP). Compared to other defenses, BS is also characterized by the following features: (i) BS causes less test accuracy drop; (ii) BS is attack-independent and (iii) BS is compatible with other defenses and can be used jointly with others.},
annote = {Comment: Accepted by AdvML19: Workshop on Adversarial Learning Methods for Machine Learning and Data Mining at KDD, Anchorage, Alaska, USA, August 5th, 2019, 5 pages},
author = {Wang, Xiao and Wang, Siyue and Chen, Pin-Yu and Lin, Xue and Chin, Peter},
keywords = {Computer Science – Computer Vision and Pattern Recognition, Computer Science – Machine Learning},
month = {February},
note = {arXiv:2002.07920 [cs]},
publisher = {arXiv},
shorttitle = {Block Switching},
title = {Block Switching: A Stochastic Approach for Deep Learning Security},
url = {http://arxiv.org/abs/2002.07920},
urldate = {2022-08-06},
year = {2020}
}