FEATURE: allow search engines to index tag pages. (#12248)

Previously, we blocked search engines in tag pages since they may get marked as a duplicate content.

* DEV: block tag inner pages from search engines crawling.
This commit is contained in:
Vinoth Kannan 2021-03-09 23:55:57 +05:30 committed by GitHub
parent 7435d55ea6
commit e3d8e828b8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 2 additions and 3 deletions

View File

@ -23,7 +23,7 @@ class RobotsTxtController < ApplicationController
/u
/my
/search
/tag
/tag/*/l
/g
/t/*/*.rss
/c/*.rss

View File

@ -23,7 +23,7 @@ class TagsController < ::ApplicationController
before_action :fetch_tag, only: [:info, :create_synonyms, :destroy_synonym]
after_action :add_noindex_header
after_action :add_noindex_header, except: [:index, :show]
def index
@description_meta = I18n.t("tags.title")

View File

@ -29,7 +29,6 @@ describe TagsController do
tags = response.parsed_body["tags"]
expect(tags.length).to eq(1)
expect(tags[0]['text']).to eq("topic-test")
expect(response.headers['X-Robots-Tag']).to eq('noindex')
end
end