本文主要是对上一篇文章简书文章转换成hexo文章的补充。上一次自动转换的代码创建时间固定是now,但是我希望能够和简书上的一样。这样就可以查看自己文章发布的大致历程。思路使用爬虫爬取信息,爬到的信息使用Map(文章名,日期)的形式保存。
环境
jdk1.8
- 引入
jar
包
<!-- https://mvnrepository.com/artifact/org.jsoup/jsoup -->
<dependency>
<groupId>org.jsoup</groupId>
<artifactId>jsoup</artifactId>
<version>1.10.2</version>
<optional>true</optional>
</dependency>
<!-- https://mvnrepository.com/artifact/net.sourceforge.htmlunit/htmlunit -->
<dependency>
<groupId>net.sourceforge.htmlunit</groupId>
<artifactId>htmlunit</artifactId>
<version>2.32</version>
<optional>true</optional>
</dependency>
代码
import com.gargoylesoftware.htmlunit.WebClient;
import com.gargoylesoftware.htmlunit.html.HtmlPage;
import org.apache.commons.lang3.StringUtils;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.jsoup.nodes.Document;
/**
* @author colin.cheng
* @version V1.0
* @date Created In 9:29 2019/9/30
*/
public class jsTest {
static Map<String,String> countMap = new HashMap<>();
public static void main(String[] args) {
Map<String,String> urlMap = new HashMap<>(255);
String baseUrl = "https://www.jianshu.com/u/2eb26c0a6d3b?order_by=shared_at&page=";
getTime(baseUrl,urlMap);
//爬取结果 Map(title,time)
urlMap.forEach((k,v)->{
try {
System.out.println(k + " = " + v);
}catch (Exception e){
e.printStackTrace();
}
});
}
private static void getTime(String baseUrl,Map urlMap){
try{
// Map<String,String> urlMap = new HashMap<>(255);
// String baseUrl = "https://www.jianshu.com/u/2eb26c0a6d3b?order_by=shared_at&page=";
String currentUrl = "";
int pageIndex=0;
do{
currentUrl = baseUrl+pageIndex;
pageIndex++;
System.out.println("currentUrl = " + currentUrl);
}while (getJianShuArticleUrlList(currentUrl,urlMap));
// System.out.println("pageIndex = " + pageIndex);
}catch (Exception e){
e.printStackTrace();
}
}
public static boolean getJianShuArticleUrlList(String oneUrl, Map<String,String> urlMap){
boolean res = true;
// 模拟浏览器操作
WebClient webClient = new WebClient();
// 关闭css代码功能
webClient.getOptions().setThrowExceptionOnScriptError(false);
webClient.getOptions().setCssEnabled(false);
// 如若有可能找不到文件js则加上这句代码
webClient.getOptions().setThrowExceptionOnFailingStatusCode(false);
// 获取第一级网页html
try{
HtmlPage page = webClient.getPage(oneUrl);
if(page==null||!page.isHtmlPage()){
return false;
}
Document doc = Jsoup.parse(page.asXml());
Elements dates = doc.select("[class=note-list] li");
Iterator var2 = dates.iterator();
Element element = null;
Element titleElement = null;
Element timeElement = null;
while(var2.hasNext()) {
element = (Element)var2.next();
titleElement = element.select("[class=title]").first();
timeElement = element.select("[class=time]").first();
//爬取的时候发现有一些数据的样式发生了改变,这里设置成如果找不到就使用另一种样式。
if(timeElement==null){
timeElement = element.select("[data-type=share_note]").first();
}
if(timeElement!=null&&titleElement!=null){
//爬取的时候发现有一些数据的样式发生了改变,这里设置成如果找不到就使用另一种样式。
String tag = "data-shared-at";
if(StringUtils.isBlank(timeElement.attr(tag))){
tag = "data-datetime";
}
if(StringUtils.isNotBlank(titleElement.text())&&StringUtils.isNotBlank(timeElement.attr(tag))){
//爬取时发现有数据重复的情况,两页相连的数据会重复。这里判断出现次数,到达三次结束爬取。
if(StringUtils.isNotBlank(urlMap.get(titleElement.text()))){
int count = countMap.get(titleElement.text())==null?1:Integer.valueOf(countMap.get(titleElement.text())).intValue();
count++;
countMap.put(titleElement.text(),count+"");
if(count==3){
return false;
}
}
//时间格式转换一下
String dateStr = timeElement.attr(tag);
dateStr = dateStr.replace("T"," ");
dateStr = dateStr.replace("+08:00","");
urlMap.put(titleElement.text(),dateStr);
}
}else {
return false;
}
}
}catch (Exception e){
e.printStackTrace();
res=false;
}
return res;
}
}