class QingHaiSpider(scrapy.Spider): name = "qinghai" headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36', 'Accept': 'application/json, text/Javascript, /; q=0.01'} results = {"001002":"01", "001001":"02", "001005":"03"} def start_requests(self): url = 'http://111.44.251.34/inteligentsearch/rest/inteligentSearch/getFullTextData' for k in self.results: formdata1 = {"token":"","pn":0,"rn":10,"sdt":"","edt":"","wd":"","inc_wd":"","exc_wd":"","fields":"title","cnum":"001;002;003;004;005;006;007;008;009;010","sort":"{"showdate":"0"}","ssort":"title","cl":200,"terminal":"","condition":[{"fieldName":"categorynum","isLike":True,"likeType":2,"equal":k}],"time":None,"highlights":"title","statistics":None,"unionCondition":None,"accuracy":"100","noParticiple":"0","searchRange":None,"isBusiness":1} formdata = json.dumps(formdata1) request = scrapy.Request(url=url, method='POST', body=formdata ,headers=self.headers, callback=self.parse, dont_filter=False, meta={'key':k})
yield request def parse(self, response): w = response.meta['key'] max_counts = json.loads(response.text)["result"]["totalcount"] datas = json.loads(response.text)["result"]["records"] for data in datas: item = QhItem() item["title"] = data["title"] item["date"] = data["showdate"] item["detail_url"] = "http://111.44.251.34" + data["linkurl"] item["area_code"] = "QINGHAI" item["publish_id"] = "181818" item["thing_id"] = "42" if "001002" in item["detail_url"]: item["content_type"] = "01" elif "001001" in item["detail_url"]: item["content_type"] = "02" else: item["content_type"] = "03" yield item for i in range(10, max_counts + 10, 10): formdata2 = {"token":"","pn":i,"rn":10,"sdt":"","edt":"","wd":"","inc_wd":"","exc_wd":"","fields":"title","cnum":"001;002;003;004;005;006;007;008;009;010","sort":"{\"showdate\":\"0\"}","ssort":"title","cl":200,"terminal":"","condition":[{"fieldName":"categorynum","isLike":True,"likeType":2,"equal":w}],"time":None,"highlights":"title","statistics":None,"unionCondition":None,"accuracy":"100","noParticiple":"0","searchRange":None,"isBusiness":1} formdata = json.dumps(formdata2) url = 'http://111.44.251.34/inteligentsearch/rest/inteligentSearch/getFullTextData' yield scrapy.Request(url=url, method='POST', body=formdata ,headers=self.headers, callback=self.parse, dont_filter=False)