2019-02-06 18:33:01 +01:00
|
|
|
pub use CommentStyle::*;
|
|
|
|
|
|
|
|
use crate::ast;
|
2020-01-01 19:25:28 +01:00
|
|
|
use rustc_span::source_map::SourceMap;
|
2019-12-31 18:15:40 +01:00
|
|
|
use rustc_span::{BytePos, CharPos, FileName, Pos};
|
2012-12-23 23:41:37 +01:00
|
|
|
|
2019-10-11 14:39:52 +02:00
|
|
|
use log::debug;
|
2020-01-11 17:02:46 +01:00
|
|
|
use std::usize;
|
2019-10-11 14:39:52 +02:00
|
|
|
|
2019-08-01 23:26:40 +02:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests;
|
|
|
|
|
2016-10-12 19:54:41 +02:00
|
|
|
#[derive(Clone, Copy, PartialEq, Debug)]
|
2014-01-09 14:05:33 +01:00
|
|
|
pub enum CommentStyle {
|
2014-06-09 22:12:30 +02:00
|
|
|
/// No code on either side of each line of the comment
|
|
|
|
Isolated,
|
|
|
|
/// Code exists to the left of the comment
|
|
|
|
Trailing,
|
|
|
|
/// Code before /* foo */ and after the comment
|
|
|
|
Mixed,
|
|
|
|
/// Just a manual blank line "\n\n", for layout
|
|
|
|
BlankLine,
|
2012-04-15 12:27:24 +02:00
|
|
|
}
|
|
|
|
|
2015-01-04 04:54:18 +01:00
|
|
|
#[derive(Clone)]
|
2014-01-09 14:05:33 +01:00
|
|
|
pub struct Comment {
|
2014-03-27 23:39:48 +01:00
|
|
|
pub style: CommentStyle,
|
2014-05-23 01:57:53 +02:00
|
|
|
pub lines: Vec<String>,
|
2014-03-27 23:39:48 +01:00
|
|
|
pub pos: BytePos,
|
2013-02-21 09:16:31 +01:00
|
|
|
}
|
2012-04-15 12:27:24 +02:00
|
|
|
|
2019-10-15 22:48:13 +02:00
|
|
|
pub fn is_line_doc_comment(s: &str) -> bool {
|
2019-12-22 23:42:04 +01:00
|
|
|
let res = (s.starts_with("///") && *s.as_bytes().get(3).unwrap_or(&b' ') != b'/')
|
|
|
|
|| s.starts_with("//!");
|
2019-10-11 14:39:52 +02:00
|
|
|
debug!("is {:?} a doc comment? {}", s, res);
|
|
|
|
res
|
|
|
|
}
|
|
|
|
|
2019-10-15 22:48:13 +02:00
|
|
|
pub fn is_block_doc_comment(s: &str) -> bool {
|
2019-10-11 14:39:52 +02:00
|
|
|
// Prevent `/**/` from being parsed as a doc comment
|
2019-12-22 23:42:04 +01:00
|
|
|
let res = ((s.starts_with("/**") && *s.as_bytes().get(3).unwrap_or(&b' ') != b'*')
|
|
|
|
|| s.starts_with("/*!"))
|
|
|
|
&& s.len() >= 5;
|
2019-10-11 14:39:52 +02:00
|
|
|
debug!("is {:?} a doc comment? {}", s, res);
|
|
|
|
res
|
|
|
|
}
|
|
|
|
|
2019-10-10 10:26:10 +02:00
|
|
|
// FIXME(#64197): Try to privatize this again.
|
|
|
|
pub fn is_doc_comment(s: &str) -> bool {
|
2019-12-22 23:42:04 +01:00
|
|
|
(s.starts_with("///") && is_line_doc_comment(s))
|
|
|
|
|| s.starts_with("//!")
|
|
|
|
|| (s.starts_with("/**") && is_block_doc_comment(s))
|
|
|
|
|| s.starts_with("/*!")
|
2012-06-30 12:54:54 +02:00
|
|
|
}
|
|
|
|
|
2013-07-19 13:51:37 +02:00
|
|
|
pub fn doc_comment_style(comment: &str) -> ast::AttrStyle {
|
2013-03-29 02:39:09 +01:00
|
|
|
assert!(is_doc_comment(comment));
|
2013-05-19 07:07:44 +02:00
|
|
|
if comment.starts_with("//!") || comment.starts_with("/*!") {
|
2015-10-01 18:03:34 +02:00
|
|
|
ast::AttrStyle::Inner
|
2012-06-30 12:54:54 +02:00
|
|
|
} else {
|
2015-10-01 18:03:34 +02:00
|
|
|
ast::AttrStyle::Outer
|
2012-06-30 12:54:54 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-23 01:57:53 +02:00
|
|
|
pub fn strip_doc_comment_decoration(comment: &str) -> String {
|
2012-06-30 12:54:54 +02:00
|
|
|
/// remove whitespace-only lines from the start/end of lines
|
2015-02-13 08:33:44 +01:00
|
|
|
fn vertical_trim(lines: Vec<String>) -> Vec<String> {
|
2015-01-28 02:01:48 +01:00
|
|
|
let mut i = 0;
|
2013-06-05 06:43:41 +02:00
|
|
|
let mut j = lines.len();
|
2013-09-23 23:18:26 +02:00
|
|
|
// first line of all-stars should be omitted
|
2016-01-03 10:14:09 +01:00
|
|
|
if !lines.is_empty() && lines[0].chars().all(|c| c == '*') {
|
2013-09-23 23:18:26 +02:00
|
|
|
i += 1;
|
|
|
|
}
|
2018-08-12 15:43:51 +02:00
|
|
|
|
2014-11-27 21:00:50 +01:00
|
|
|
while i < j && lines[i].trim().is_empty() {
|
2013-09-23 23:18:26 +02:00
|
|
|
i += 1;
|
|
|
|
}
|
|
|
|
// like the first, a last line of all stars should be omitted
|
2019-12-22 23:42:04 +01:00
|
|
|
if j > i && lines[j - 1].chars().skip(1).all(|c| c == '*') {
|
2013-09-23 23:18:26 +02:00
|
|
|
j -= 1;
|
2012-06-30 12:54:54 +02:00
|
|
|
}
|
2018-08-12 15:43:51 +02:00
|
|
|
|
2014-11-27 21:00:50 +01:00
|
|
|
while j > i && lines[j - 1].trim().is_empty() {
|
2013-09-23 23:18:26 +02:00
|
|
|
j -= 1;
|
2012-06-30 12:54:54 +02:00
|
|
|
}
|
2018-08-12 15:43:51 +02:00
|
|
|
|
2017-05-12 20:05:39 +02:00
|
|
|
lines[i..j].to_vec()
|
2012-06-30 12:54:54 +02:00
|
|
|
}
|
|
|
|
|
2013-06-15 00:37:29 +02:00
|
|
|
/// remove a "[ \t]*\*" block from each line, if possible
|
2016-01-03 10:14:09 +01:00
|
|
|
fn horizontal_trim(lines: Vec<String>) -> Vec<String> {
|
2015-01-18 00:33:05 +01:00
|
|
|
let mut i = usize::MAX;
|
2013-06-15 00:37:29 +02:00
|
|
|
let mut can_trim = true;
|
|
|
|
let mut first = true;
|
2018-08-12 15:43:51 +02:00
|
|
|
|
2015-01-31 18:20:46 +01:00
|
|
|
for line in &lines {
|
2014-11-27 21:00:50 +01:00
|
|
|
for (j, c) in line.chars().enumerate() {
|
2015-02-19 14:36:58 +01:00
|
|
|
if j > i || !"* \t".contains(c) {
|
2013-06-15 00:37:29 +02:00
|
|
|
can_trim = false;
|
2012-06-30 12:54:54 +02:00
|
|
|
break;
|
|
|
|
}
|
2013-06-15 00:37:29 +02:00
|
|
|
if c == '*' {
|
|
|
|
if first {
|
|
|
|
i = j;
|
|
|
|
first = false;
|
|
|
|
} else if i != j {
|
|
|
|
can_trim = false;
|
|
|
|
}
|
2012-06-30 12:54:54 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-01-05 08:09:02 +01:00
|
|
|
if i >= line.len() {
|
2013-06-15 00:37:29 +02:00
|
|
|
can_trim = false;
|
|
|
|
}
|
|
|
|
if !can_trim {
|
|
|
|
break;
|
|
|
|
}
|
2012-06-30 12:54:54 +02:00
|
|
|
}
|
|
|
|
|
2013-06-15 00:37:29 +02:00
|
|
|
if can_trim {
|
2019-12-22 23:42:04 +01:00
|
|
|
lines.iter().map(|line| (&line[i + 1..line.len()]).to_string()).collect()
|
2013-06-15 00:37:29 +02:00
|
|
|
} else {
|
|
|
|
lines
|
|
|
|
}
|
2012-06-30 12:54:54 +02:00
|
|
|
}
|
|
|
|
|
2013-09-23 23:18:26 +02:00
|
|
|
// one-line comments lose their prefix
|
2018-08-12 15:43:51 +02:00
|
|
|
const ONELINERS: &[&str] = &["///!", "///", "//!", "//"];
|
|
|
|
|
2015-02-27 15:36:53 +01:00
|
|
|
for prefix in ONELINERS {
|
2013-09-23 23:18:26 +02:00
|
|
|
if comment.starts_with(*prefix) {
|
2015-01-07 17:58:31 +01:00
|
|
|
return (&comment[prefix.len()..]).to_string();
|
2013-09-23 23:18:26 +02:00
|
|
|
}
|
2012-06-30 12:54:54 +02:00
|
|
|
}
|
|
|
|
|
2013-05-19 07:07:44 +02:00
|
|
|
if comment.starts_with("/*") {
|
2019-12-22 23:42:04 +01:00
|
|
|
let lines =
|
|
|
|
comment[3..comment.len() - 2].lines().map(|s| s.to_string()).collect::<Vec<String>>();
|
2013-06-15 00:37:29 +02:00
|
|
|
|
2012-06-30 12:54:54 +02:00
|
|
|
let lines = vertical_trim(lines);
|
2013-06-15 00:37:29 +02:00
|
|
|
let lines = horizontal_trim(lines);
|
|
|
|
|
2015-07-10 14:19:21 +02:00
|
|
|
return lines.join("\n");
|
2012-06-30 12:54:54 +02:00
|
|
|
}
|
|
|
|
|
2014-10-09 21:17:22 +02:00
|
|
|
panic!("not a doc-comment: {}", comment);
|
2012-06-30 12:54:54 +02:00
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Returns `None` if the first `col` chars of `s` contain a non-whitespace char.
|
|
|
|
/// Otherwise returns `Some(k)` where `k` is first char offset after that leading
|
|
|
|
/// whitespace. Note that `k` may be outside bounds of `s`.
|
2015-01-18 00:33:05 +01:00
|
|
|
fn all_whitespace(s: &str, col: CharPos) -> Option<usize> {
|
2018-10-27 14:41:26 +02:00
|
|
|
let mut idx = 0;
|
|
|
|
for (i, ch) in s.char_indices().take(col.to_usize()) {
|
2015-03-11 00:29:02 +01:00
|
|
|
if !ch.is_whitespace() {
|
2013-05-27 12:08:37 +02:00
|
|
|
return None;
|
|
|
|
}
|
2018-12-05 04:15:28 +01:00
|
|
|
idx = i + ch.len_utf8();
|
2012-08-02 02:30:05 +02:00
|
|
|
}
|
2018-10-27 14:41:26 +02:00
|
|
|
Some(idx)
|
2012-04-15 12:27:24 +02:00
|
|
|
}
|
|
|
|
|
2019-05-06 10:53:40 +02:00
|
|
|
fn trim_whitespace_prefix(s: &str, col: CharPos) -> &str {
|
2013-05-09 22:27:24 +02:00
|
|
|
let len = s.len();
|
2019-05-06 10:53:40 +02:00
|
|
|
match all_whitespace(&s, col) {
|
2019-12-22 23:42:04 +01:00
|
|
|
Some(col) => {
|
|
|
|
if col < len {
|
|
|
|
&s[col..]
|
|
|
|
} else {
|
|
|
|
""
|
|
|
|
}
|
|
|
|
}
|
2013-05-27 12:08:37 +02:00
|
|
|
None => s,
|
2012-04-15 12:27:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-22 23:42:04 +01:00
|
|
|
fn split_block_comment_into_lines(text: &str, col: CharPos) -> Vec<String> {
|
2019-05-06 10:53:40 +02:00
|
|
|
let mut res: Vec<String> = vec![];
|
|
|
|
let mut lines = text.lines();
|
|
|
|
// just push the first line
|
|
|
|
res.extend(lines.next().map(|it| it.to_string()));
|
|
|
|
// for other lines, strip common whitespace prefix
|
|
|
|
for line in lines {
|
|
|
|
res.push(trim_whitespace_prefix(line, col).to_string())
|
2016-01-03 10:14:09 +01:00
|
|
|
}
|
2019-05-06 10:53:40 +02:00
|
|
|
res
|
2012-04-15 12:27:24 +02:00
|
|
|
}
|
|
|
|
|
2013-04-03 01:44:01 +02:00
|
|
|
// it appears this function is called only from pprust... that's
|
|
|
|
// probably not a good thing.
|
2020-01-11 17:02:46 +01:00
|
|
|
pub fn gather_comments(sm: &SourceMap, path: FileName, src: String) -> Vec<Comment> {
|
2020-01-11 10:33:18 +01:00
|
|
|
let cm = SourceMap::new(sm.path_mapping().clone());
|
2018-08-18 12:13:56 +02:00
|
|
|
let source_file = cm.new_source_file(path, src);
|
2019-05-06 10:53:40 +02:00
|
|
|
let text = (*source_file.src.as_ref().unwrap()).clone();
|
2012-06-15 18:32:17 +02:00
|
|
|
|
2019-05-06 10:53:40 +02:00
|
|
|
let text: &str = text.as_str();
|
|
|
|
let start_bpos = source_file.start_pos;
|
|
|
|
let mut pos = 0;
|
2014-02-28 22:09:09 +01:00
|
|
|
let mut comments: Vec<Comment> = Vec::new();
|
2019-05-06 10:53:40 +02:00
|
|
|
let mut code_to_the_left = false;
|
|
|
|
|
|
|
|
if let Some(shebang_len) = rustc_lexer::strip_shebang(text) {
|
|
|
|
comments.push(Comment {
|
|
|
|
style: Isolated,
|
|
|
|
lines: vec![text[..shebang_len].to_string()],
|
|
|
|
pos: start_bpos,
|
|
|
|
});
|
|
|
|
pos += shebang_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
for token in rustc_lexer::tokenize(&text[pos..]) {
|
|
|
|
let token_text = &text[pos..pos + token.len];
|
|
|
|
match token.kind {
|
|
|
|
rustc_lexer::TokenKind::Whitespace => {
|
|
|
|
if let Some(mut idx) = token_text.find('\n') {
|
|
|
|
code_to_the_left = false;
|
|
|
|
while let Some(next_newline) = &token_text[idx + 1..].find('\n') {
|
|
|
|
idx = idx + 1 + next_newline;
|
|
|
|
comments.push(Comment {
|
|
|
|
style: BlankLine,
|
|
|
|
lines: vec![],
|
|
|
|
pos: start_bpos + BytePos((pos + idx) as u32),
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rustc_lexer::TokenKind::BlockComment { terminated: _ } => {
|
|
|
|
if !is_block_doc_comment(token_text) {
|
|
|
|
let code_to_the_right = match text[pos + token.len..].chars().next() {
|
|
|
|
Some('\r') | Some('\n') => false,
|
|
|
|
_ => true,
|
|
|
|
};
|
|
|
|
let style = match (code_to_the_left, code_to_the_right) {
|
|
|
|
(true, true) | (false, true) => Mixed,
|
|
|
|
(false, false) => Isolated,
|
|
|
|
(true, false) => Trailing,
|
|
|
|
};
|
|
|
|
|
|
|
|
// Count the number of chars since the start of the line by rescanning.
|
|
|
|
let pos_in_file = start_bpos + BytePos(pos as u32);
|
|
|
|
let line_begin_in_file = source_file.line_begin_pos(pos_in_file);
|
|
|
|
let line_begin_pos = (line_begin_in_file - start_bpos).to_usize();
|
|
|
|
let col = CharPos(text[line_begin_pos..pos].chars().count());
|
|
|
|
|
|
|
|
let lines = split_block_comment_into_lines(token_text, col);
|
|
|
|
comments.push(Comment { style, lines, pos: pos_in_file })
|
2016-10-12 19:54:41 +02:00
|
|
|
}
|
2012-04-15 12:27:24 +02:00
|
|
|
}
|
2019-05-06 10:53:40 +02:00
|
|
|
rustc_lexer::TokenKind::LineComment => {
|
|
|
|
if !is_doc_comment(token_text) {
|
|
|
|
comments.push(Comment {
|
|
|
|
style: if code_to_the_left { Trailing } else { Isolated },
|
|
|
|
lines: vec![token_text.to_string()],
|
|
|
|
pos: start_bpos + BytePos(pos as u32),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
code_to_the_left = true;
|
2012-04-15 12:27:24 +02:00
|
|
|
}
|
|
|
|
}
|
2019-05-06 10:53:40 +02:00
|
|
|
pos += token.len;
|
2012-04-15 12:27:24 +02:00
|
|
|
}
|
2013-02-21 09:16:31 +01:00
|
|
|
|
2019-05-09 18:04:04 +02:00
|
|
|
comments
|
2012-04-15 12:27:24 +02:00
|
|
|
}
|