FEATURE: allow search engines to index tag pages. (#12248)
Previously, we blocked search engines in tag pages since they may get marked as a duplicate content. * DEV: block tag inner pages from search engines crawling.
This commit is contained in:
parent
7435d55ea6
commit
e3d8e828b8
|
@ -23,7 +23,7 @@ class RobotsTxtController < ApplicationController
|
|||
/u
|
||||
/my
|
||||
/search
|
||||
/tag
|
||||
/tag/*/l
|
||||
/g
|
||||
/t/*/*.rss
|
||||
/c/*.rss
|
||||
|
|
|
@ -23,7 +23,7 @@ class TagsController < ::ApplicationController
|
|||
|
||||
before_action :fetch_tag, only: [:info, :create_synonyms, :destroy_synonym]
|
||||
|
||||
after_action :add_noindex_header
|
||||
after_action :add_noindex_header, except: [:index, :show]
|
||||
|
||||
def index
|
||||
@description_meta = I18n.t("tags.title")
|
||||
|
|
|
@ -29,7 +29,6 @@ describe TagsController do
|
|||
tags = response.parsed_body["tags"]
|
||||
expect(tags.length).to eq(1)
|
||||
expect(tags[0]['text']).to eq("topic-test")
|
||||
expect(response.headers['X-Robots-Tag']).to eq('noindex')
|
||||
end
|
||||
end
|
||||
|
||||
|
|
Loading…
Reference in New Issue