44 lines
1.4 KiB
TypeScript
44 lines
1.4 KiB
TypeScript
/**
|
|
* robots.ts — Robots.txt generation for Novarix Networks
|
|
*
|
|
* Next.js App Router generates /robots.txt automatically from the object
|
|
* returned by this default export.
|
|
*
|
|
* ─── Notes ───────────────────────────────────────────────────────────────
|
|
*
|
|
* • The `sitemap` field should match the canonical domain. If the domain
|
|
* ever changes, update both this file and sitemap.ts.
|
|
*
|
|
* • To block specific paths (e.g. staging pages or admin routes), add
|
|
* `disallow` entries to the rules array:
|
|
*
|
|
* rules: [
|
|
* { userAgent: "*", allow: "/", disallow: ["/admin/", "/staging/"] },
|
|
* ],
|
|
*
|
|
* • To block a specific crawler entirely:
|
|
*
|
|
* rules: [
|
|
* { userAgent: "*", allow: "/" },
|
|
* { userAgent: "GPTBot", disallow: "/" },
|
|
* ],
|
|
*
|
|
* ─── Output ──────────────────────────────────────────────────────────────
|
|
*
|
|
* User-agent: *
|
|
* Allow: /
|
|
* Sitemap: https://novarixnet.com/sitemap.xml
|
|
*/
|
|
|
|
import type { MetadataRoute } from "next";
|
|
|
|
export default function robots(): MetadataRoute.Robots {
|
|
return {
|
|
rules: {
|
|
userAgent: "*",
|
|
allow: "/",
|
|
},
|
|
sitemap: "https://novarixnet.com/sitemap.xml",
|
|
};
|
|
}
|