 ad4451276d
			
		
	
	ad4451276d
	
	
	
		
			
			* Add a `robots.txt` file to prevent crawlers from scraping the site * Added `ASSET_RIGHTS` entry to config.yaml to control whether `/robots.txt` is served or not * Always import robots.py, determine config in route function * Finish writing a comment * Remove unnecessary redundant import and config
		
			
				
	
	
		
			15 lines
		
	
	
		
			378 B
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			15 lines
		
	
	
		
			378 B
		
	
	
	
		
			Python
		
	
	
	
	
	
| from WebHostLib import app
 | |
| from flask import abort
 | |
| from . import cache
 | |
| 
 | |
| 
 | |
| @cache.cached()
 | |
| @app.route('/robots.txt')
 | |
| def robots():
 | |
|     # If this host is not official, do not allow search engine crawling
 | |
|     if not app.config["ASSET_RIGHTS"]:
 | |
|         return app.send_static_file('robots.txt')
 | |
| 
 | |
|     # Send 404 if the host has affirmed this to be the official WebHost
 | |
|     abort(404)
 |