App Store Compliance (Guideline 1.2)
Apple's App Store Review Guideline 1.2 requires every iOS app with user-generated content to implement specific safety features. Vettly provides infrastructure to meet all four requirements with a single integration.
What Apple Requires
Guideline 1.2 states that apps with user-generated content must include:
- A method to filter objectionable material from being posted
- A mechanism for users to report offensive content
- The ability to block abusive users from the service
- Published contact information so users can reach you
Apps that fail to meet these requirements are rejected during App Review.
Requirement Mapping
| Apple Requirement | Vettly Feature | API |
|---|---|---|
| Filter objectionable material | Content moderation checks | POST /v1/check |
| Report offensive content | Appeals / reporting workflow | POST /v1/appeals |
| Block abusive users | Blocklist API | Blocklist management |
| Published contact info | Your responsibility | Guidance below |
1. Filter Objectionable Material
Use vettly.check() to screen text and images before they appear in your app.
Swift (URLSession)
func moderateContent(_ text: String) async throws -> Bool {
let url = URL(string: "https://api.vettly.dev/v1/check")!
var request = URLRequest(url: url)
request.httpMethod = "POST"
request.setValue("Bearer \(apiKey)", forHTTPHeaderField: "Authorization")
request.setValue("application/json", forHTTPHeaderField: "Content-Type")
let body: [String: Any] = [
"content": text,
"contentType": "text",
"policyId": "app-store"
]
request.httpBody = try JSONSerialization.data(withJSONObject: body)
let (data, _) = try await URLSession.shared.data(for: request)
let result = try JSONDecoder().decode(ModerationResult.self, from: data)
return result.action != "block"
}React Native / Expo
import { ModerationClient } from '@vettly/sdk'
const vettly = new ModerationClient({
apiKey: process.env.EXPO_PUBLIC_VETTLY_API_KEY!
})
async function isContentAllowed(content: string): Promise<boolean> {
const result = await vettly.check({
content,
contentType: 'text',
policyId: 'app-store'
})
return result.action !== 'block'
}2. Report Offensive Content
Use the appeals API to let users report content they find objectionable.
Swift
func reportContent(contentId: String, reason: String, reporterId: String) async throws {
let url = URL(string: "https://api.vettly.dev/v1/appeals")!
var request = URLRequest(url: url)
request.httpMethod = "POST"
request.setValue("Bearer \(apiKey)", forHTTPHeaderField: "Authorization")
request.setValue("application/json", forHTTPHeaderField: "Content-Type")
let body: [String: Any] = [
"contentId": contentId,
"reason": reason,
"reporterId": reporterId
]
request.httpBody = try JSONSerialization.data(withJSONObject: body)
let (_, response) = try await URLSession.shared.data(for: request)
guard let httpResponse = response as? HTTPURLResponse,
httpResponse.statusCode == 201 else {
throw ReportError.failed
}
}React Native / Expo
async function reportContent(contentId: string, reason: string) {
const appeal = await vettly.createAppeal({
contentId,
reason,
reporterId: currentUser.id
})
Alert.alert('Report Submitted', 'Thank you. We will review this content.')
return appeal
}Report Button Component (React Native)
import React, { useState } from 'react'
import { TouchableOpacity, Text, Alert, TextInput, Modal, View } from 'react-native'
export function ReportButton({ contentId, userId }) {
const [showModal, setShowModal] = useState(false)
const [reason, setReason] = useState('')
const submitReport = async () => {
try {
await vettly.createAppeal({
contentId,
reason,
reporterId: userId
})
setShowModal(false)
setReason('')
Alert.alert('Reported', 'Thank you for reporting this content.')
} catch (err) {
Alert.alert('Error', 'Failed to submit report. Please try again.')
}
}
return (
<>
<TouchableOpacity onPress={() => setShowModal(true)}>
<Text style={{ color: '#ef4444' }}>Report</Text>
</TouchableOpacity>
<Modal visible={showModal} transparent>
<View style={{ flex: 1, justifyContent: 'center', padding: 24, backgroundColor: 'rgba(0,0,0,0.5)' }}>
<View style={{ backgroundColor: '#fff', borderRadius: 12, padding: 20 }}>
<Text style={{ fontSize: 18, fontWeight: '600', marginBottom: 12 }}>Report Content</Text>
<TextInput
placeholder="Why are you reporting this?"
value={reason}
onChangeText={setReason}
multiline
style={{ borderWidth: 1, borderColor: '#ddd', borderRadius: 8, padding: 12, minHeight: 80, marginBottom: 12 }}
/>
<TouchableOpacity onPress={submitReport} style={{ backgroundColor: '#ef4444', borderRadius: 8, padding: 14, alignItems: 'center' }}>
<Text style={{ color: '#fff', fontWeight: '600' }}>Submit Report</Text>
</TouchableOpacity>
<TouchableOpacity onPress={() => setShowModal(false)} style={{ padding: 14, alignItems: 'center' }}>
<Text>Cancel</Text>
</TouchableOpacity>
</View>
</View>
</Modal>
</>
)
}3. Block Abusive Users
Use the blocklist API to let users block other users from contacting them.
Swift
func blockUser(_ blockedUserId: String, by userId: String) async throws {
let url = URL(string: "https://api.vettly.dev/v1/blocklist")!
var request = URLRequest(url: url)
request.httpMethod = "POST"
request.setValue("Bearer \(apiKey)", forHTTPHeaderField: "Authorization")
request.setValue("application/json", forHTTPHeaderField: "Content-Type")
let body: [String: Any] = [
"userId": userId,
"blockedUserId": blockedUserId
]
request.httpBody = try JSONSerialization.data(withJSONObject: body)
let (_, response) = try await URLSession.shared.data(for: request)
guard let httpResponse = response as? HTTPURLResponse,
httpResponse.statusCode == 201 else {
throw BlockError.failed
}
}React Native / Expo
async function blockUser(blockedUserId: string) {
await fetch('https://api.vettly.dev/v1/blocklist', {
method: 'POST',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({
userId: currentUser.id,
blockedUserId
})
})
Alert.alert('User Blocked', 'You will no longer see content from this user.')
}4. Published Contact Information
This requirement is your responsibility as the app publisher. Apple requires:
- A way for users to contact you (email, website, or in-app support)
- Contact information must be accessible within the app
Recommendations:
- Add a "Contact Us" link in your app's Settings or About screen
- Include a support email in your App Store listing
- Link to a support page or help center
// Example: Settings screen with contact info
struct SettingsView: View {
var body: some View {
List {
Section("Support") {
Link("Contact Us", destination: URL(string: "mailto:[email protected]")!)
Link("Help Center", destination: URL(string: "https://yourapp.com/support")!)
}
}
}
}Implementation Checklist
Content Filtering
- [ ] Integrate
vettly.check()for text content - [ ] Integrate
vettly.check()for image uploads (if applicable) - [ ] Apply the
app-storepolicy template - [ ] Handle
blockactions before content is posted - [ ] Handle
flagactions with human review queue
Reporting Mechanism
- [ ] Add a "Report" button on user-generated content
- [ ] Connect reports to the Vettly appeals API
- [ ] Show confirmation to the reporting user
- [ ] Set up notification for incoming reports
User Blocking
- [ ] Add a "Block User" option on user profiles or content
- [ ] Connect to the Vettly blocklist API
- [ ] Filter blocked users from feeds and messages
- [ ] Allow users to view and manage their block list
Contact Information
- [ ] Add contact information in the app (Settings/About screen)
- [ ] Include support email in App Store listing
- [ ] Verify contact information is accessible and working
Pre-built Policy
Vettly provides an app-store policy template designed for Guideline 1.2 compliance. See Moderation Policies for the full configuration.
// Use the pre-built App Store policy
const result = await vettly.check({
content: userMessage,
contentType: 'text',
policyId: 'app-store'
})Next Steps
- Swift Integration — Full Swift integration guide
- React Native Integration — React Native setup
- Expo Integration — Expo-specific guide
- Appeals Workflow — Configure reporting and appeals
- Audit Trails — Evidence and compliance records
- Moderation Policies — Policy templates and customization
