From 51fdfbf3d69c7ed8e91a45cd3bc1f3484c102c6b Mon Sep 17 00:00:00 2001
From: genki <123@1234.com>
Date: Thu, 8 Jan 2026 00:02:27 -0500
Subject: [PATCH] Improved Training Screen and underlying
Added diagnostic view model with flag for picture detection but broke fucking everything meassing with tagDAO. au demain
---
.idea/deploymentTargetSelector.xml | 8 +
.../sherpai2/data/local/AppDatabase.kt | 4 +-
.../sherpai2/data/local/dao/ImagePersonDao.kt | 25 -
.../sherpai2/data/local/dao/ImageTagDao.kt | 2 +
.../sherpai2/data/local/dao/TagDao.kt | 201 ++++++-
.../local/entity/Facerecognitionentities.kt | 83 ++-
.../data/local/entity/ImagePersonEntity.kt | 40 --
.../sherpai2/data/local/entity/TagEntity.kt | 109 +++-
.../data/local/model/ImageWithEverything.kt | 6 -
.../repository/Facerecognitionrepository.kt | 41 +-
.../Personinventoryviewmodel.kt | 142 ++---
.../ui/trainingprep/ImageSelectorScreen.kt | 376 ++++++++++---
.../ui/trainingprep/TrainViewModel.kt | 171 ++----
.../ui/trainingprep/TrainingScreen.kt | 515 +++++++++++++++++-
.../placeholder/sherpai2/util/DebugFlags.kt | 68 +++
15 files changed, 1411 insertions(+), 380 deletions(-)
delete mode 100644 app/src/main/java/com/placeholder/sherpai2/data/local/dao/ImagePersonDao.kt
delete mode 100644 app/src/main/java/com/placeholder/sherpai2/data/local/entity/ImagePersonEntity.kt
create mode 100644 app/src/main/java/com/placeholder/sherpai2/util/DebugFlags.kt
diff --git a/.idea/deploymentTargetSelector.xml b/.idea/deploymentTargetSelector.xml
index b268ef3..48cc23e 100644
--- a/.idea/deploymentTargetSelector.xml
+++ b/.idea/deploymentTargetSelector.xml
@@ -4,6 +4,14 @@
+
+
+
+
+
+
+
+
diff --git a/app/src/main/java/com/placeholder/sherpai2/data/local/AppDatabase.kt b/app/src/main/java/com/placeholder/sherpai2/data/local/AppDatabase.kt
index 6e42735..02c0a44 100644
--- a/app/src/main/java/com/placeholder/sherpai2/data/local/AppDatabase.kt
+++ b/app/src/main/java/com/placeholder/sherpai2/data/local/AppDatabase.kt
@@ -21,7 +21,6 @@ import com.placeholder.sherpai2.data.local.entity.*
TagEntity::class,
EventEntity::class,
ImageTagEntity::class,
- ImagePersonEntity::class,
ImageEventEntity::class,
// ===== NEW ENTITIES =====
@@ -29,7 +28,7 @@ import com.placeholder.sherpai2.data.local.entity.*
FaceModelEntity::class, // NEW: Face embeddings
PhotoFaceTagEntity::class // NEW: Face tags
],
- version = 3,
+ version = 4,
exportSchema = false
)
// No TypeConverters needed - embeddings stored as strings
@@ -40,7 +39,6 @@ abstract class AppDatabase : RoomDatabase() {
abstract fun tagDao(): TagDao
abstract fun eventDao(): EventDao
abstract fun imageTagDao(): ImageTagDao
- abstract fun imagePersonDao(): ImagePersonDao
abstract fun imageEventDao(): ImageEventDao
abstract fun imageAggregateDao(): ImageAggregateDao
diff --git a/app/src/main/java/com/placeholder/sherpai2/data/local/dao/ImagePersonDao.kt b/app/src/main/java/com/placeholder/sherpai2/data/local/dao/ImagePersonDao.kt
deleted file mode 100644
index 51e5903..0000000
--- a/app/src/main/java/com/placeholder/sherpai2/data/local/dao/ImagePersonDao.kt
+++ /dev/null
@@ -1,25 +0,0 @@
-package com.placeholder.sherpai2.data.local.dao
-
-import androidx.room.Dao
-import androidx.room.Insert
-import androidx.room.OnConflictStrategy
-import androidx.room.Query
-import com.placeholder.sherpai2.data.local.entity.ImagePersonEntity
-
-@Dao
-interface ImagePersonDao {
-
- @Insert(onConflict = OnConflictStrategy.REPLACE)
- suspend fun upsert(entity: ImagePersonEntity)
-
- /**
- * All images containing a specific person.
- */
- @Query("""
- SELECT imageId FROM image_persons
- WHERE personId = :personId
- AND visibility = 'PUBLIC'
- AND confirmed = 1
- """)
- suspend fun findImagesForPerson(personId: String): List
-}
diff --git a/app/src/main/java/com/placeholder/sherpai2/data/local/dao/ImageTagDao.kt b/app/src/main/java/com/placeholder/sherpai2/data/local/dao/ImageTagDao.kt
index 513e17a..f683005 100644
--- a/app/src/main/java/com/placeholder/sherpai2/data/local/dao/ImageTagDao.kt
+++ b/app/src/main/java/com/placeholder/sherpai2/data/local/dao/ImageTagDao.kt
@@ -50,4 +50,6 @@ interface ImageTagDao {
""")
fun getTagsForImage(imageId: String): Flow>
+
+
}
diff --git a/app/src/main/java/com/placeholder/sherpai2/data/local/dao/TagDao.kt b/app/src/main/java/com/placeholder/sherpai2/data/local/dao/TagDao.kt
index dec14cb..f5c8a07 100644
--- a/app/src/main/java/com/placeholder/sherpai2/data/local/dao/TagDao.kt
+++ b/app/src/main/java/com/placeholder/sherpai2/data/local/dao/TagDao.kt
@@ -4,21 +4,206 @@ import androidx.room.Dao
import androidx.room.Insert
import androidx.room.OnConflictStrategy
import androidx.room.Query
+import com.placeholder.sherpai2.data.local.entity.ImageEntity
import com.placeholder.sherpai2.data.local.entity.TagEntity
+import com.placeholder.sherpai2.data.local.entity.TagWithUsage
+import kotlinx.coroutines.flow.Flow
+/**
+ * TagDao - Tag management with face recognition integration
+ */
@Dao
interface TagDao {
- @Insert(onConflict = OnConflictStrategy.IGNORE)
- suspend fun insert(tag: TagEntity)
+ // ======================
+ // BASIC OPERATIONS
+ // ======================
+
+ @Insert(onConflict = OnConflictStrategy.IGNORE)
+ suspend fun insert(tag: TagEntity): Long
- /**
- * Resolve a tag by value.
- * Example: "park"
- */
@Query("SELECT * FROM tags WHERE value = :value LIMIT 1")
suspend fun getByValue(value: String): TagEntity?
- @Query("SELECT * FROM tags")
+ @Query("SELECT * FROM tags WHERE tagId = :tagId")
+ suspend fun getById(tagId: String): TagEntity?
+
+ @Query("SELECT * FROM tags ORDER BY value ASC")
suspend fun getAll(): List
-}
+
+ @Query("SELECT * FROM tags ORDER BY value ASC")
+ fun getAllFlow(): Flow>
+
+ @Query("SELECT * FROM tags WHERE type = :type ORDER BY value ASC")
+ suspend fun getByType(type: String): List
+
+ @Query("DELETE FROM tags WHERE tagId = :tagId")
+ suspend fun delete(tagId: String)
+
+ // ======================
+ // STATISTICS (returns TagWithUsage)
+ // ======================
+
+ /**
+ * Get most used tags WITH usage counts
+ */
+ @Query("""
+ SELECT t.tagId, t.type, t.value, t.createdAt,
+ COUNT(it.imageId) as usage_count
+ FROM tags t
+ LEFT JOIN image_tags it ON t.tagId = it.tagId
+ GROUP BY t.tagId
+ ORDER BY usage_count DESC
+ LIMIT :limit
+ """)
+ suspend fun getMostUsedTags(limit: Int = 10): List
+
+ /**
+ * Get tag usage count
+ */
+ @Query("""
+ SELECT COUNT(DISTINCT it.imageId)
+ FROM image_tags it
+ WHERE it.tagId = :tagId
+ """)
+ suspend fun getTagUsageCount(tagId: String): Int
+
+ // ======================
+ // PERSON INTEGRATION
+ // ======================
+
+ /**
+ * Get all tags used for images containing a specific person
+ */
+ @Query("""
+ SELECT DISTINCT t.* FROM tags t
+ INNER JOIN image_tags it ON t.tagId = it.tagId
+ INNER JOIN photo_face_tags pft ON it.imageId = pft.imageId
+ INNER JOIN face_models fm ON pft.faceModelId = fm.id
+ WHERE fm.personId = :personId
+ ORDER BY t.value ASC
+ """)
+ suspend fun getTagsForPerson(personId: String): List
+
+ /**
+ * Get images that have both a specific tag AND contain a specific person
+ */
+ @Query("""
+ SELECT DISTINCT i.* FROM images i
+ INNER JOIN image_tags it ON i.imageId = it.imageId
+ INNER JOIN photo_face_tags pft ON i.imageId = pft.imageId
+ INNER JOIN face_models fm ON pft.faceModelId = fm.id
+ WHERE it.tagId = :tagId AND fm.personId = :personId
+ ORDER BY i.capturedAt DESC
+ """)
+ suspend fun getImagesWithTagAndPerson(
+ tagId: String,
+ personId: String
+ ): List
+
+ /**
+ * Get images with tag and person as Flow
+ */
+ @Query("""
+ SELECT DISTINCT i.* FROM images i
+ INNER JOIN image_tags it ON i.imageId = it.imageId
+ INNER JOIN photo_face_tags pft ON i.imageId = pft.imageId
+ INNER JOIN face_models fm ON pft.faceModelId = fm.id
+ WHERE it.tagId = :tagId AND fm.personId = :personId
+ ORDER BY i.capturedAt DESC
+ """)
+ fun getImagesWithTagAndPersonFlow(
+ tagId: String,
+ personId: String
+ ): Flow>
+
+ /**
+ * Count images with tag and person
+ */
+ @Query("""
+ SELECT COUNT(DISTINCT i.imageId) FROM images i
+ INNER JOIN image_tags it ON i.imageId = it.imageId
+ INNER JOIN photo_face_tags pft ON i.imageId = pft.imageId
+ INNER JOIN face_models fm ON pft.faceModelId = fm.id
+ WHERE it.tagId = :tagId AND fm.personId = :personId
+ """)
+ suspend fun countImagesWithTagAndPerson(
+ tagId: String,
+ personId: String
+ ): Int
+
+ // ======================
+ // AUTO-SUGGESTIONS
+ // ======================
+
+ /**
+ * Suggest tags based on person's relationship
+ */
+ @Query("""
+ SELECT DISTINCT t.* FROM tags t
+ INNER JOIN image_tags it ON t.tagId = it.tagId
+ INNER JOIN photo_face_tags pft ON it.imageId = pft.imageId
+ INNER JOIN face_models fm ON pft.faceModelId = fm.id
+ INNER JOIN persons p ON fm.personId = p.id
+ WHERE p.relationship = :relationship
+ AND p.id != :excludePersonId
+ GROUP BY t.tagId
+ ORDER BY COUNT(it.imageId) DESC
+ LIMIT :limit
+ """)
+ suspend fun suggestTagsBasedOnRelationship(
+ relationship: String,
+ excludePersonId: String,
+ limit: Int = 5
+ ): List
+
+ /**
+ * Get tags commonly used with this tag
+ */
+ @Query("""
+ SELECT DISTINCT t2.* FROM tags t2
+ INNER JOIN image_tags it2 ON t2.tagId = it2.tagId
+ WHERE it2.imageId IN (
+ SELECT it1.imageId FROM image_tags it1
+ WHERE it1.tagId = :tagId
+ )
+ AND t2.tagId != :tagId
+ GROUP BY t2.tagId
+ ORDER BY COUNT(it2.imageId) DESC
+ LIMIT :limit
+ """)
+ suspend fun getRelatedTags(
+ tagId: String,
+ limit: Int = 5
+ ): List
+
+ // ======================
+ // SEARCH
+ // ======================
+
+ /**
+ * Search tags by value (partial match)
+ */
+ @Query("""
+ SELECT * FROM tags
+ WHERE value LIKE '%' || :query || '%'
+ ORDER BY value ASC
+ LIMIT :limit
+ """)
+ suspend fun searchTags(query: String, limit: Int = 20): List
+
+ /**
+ * Search tags with usage count
+ */
+ @Query("""
+ SELECT t.tagId, t.type, t.value, t.createdAt,
+ COUNT(it.imageId) as usage_count
+ FROM tags t
+ LEFT JOIN image_tags it ON t.tagId = it.tagId
+ WHERE t.value LIKE '%' || :query || '%'
+ GROUP BY t.tagId
+ ORDER BY usage_count DESC, t.value ASC
+ LIMIT :limit
+ """)
+ suspend fun searchTagsWithUsage(query: String, limit: Int = 20): List
+}
\ No newline at end of file
diff --git a/app/src/main/java/com/placeholder/sherpai2/data/local/entity/Facerecognitionentities.kt b/app/src/main/java/com/placeholder/sherpai2/data/local/entity/Facerecognitionentities.kt
index 2f99432..a917b59 100644
--- a/app/src/main/java/com/placeholder/sherpai2/data/local/entity/Facerecognitionentities.kt
+++ b/app/src/main/java/com/placeholder/sherpai2/data/local/entity/Facerecognitionentities.kt
@@ -18,15 +18,92 @@ import java.util.UUID
Index(value = ["name"])
]
)
+/**
+* PersonEntity - Represents a person in your app
+*
+* CLEAN DESIGN:
+* - Uses String UUID for id (matches your ImageEntity.imageId pattern)
+* - Face embeddings stored separately in FaceModelEntity
+* - Simple, extensible schema
+* - Now includes DOB and relationship for better organization
+*/
data class PersonEntity(
@PrimaryKey
val id: String = UUID.randomUUID().toString(),
+ /**
+ * Person's name (required)
+ */
val name: String,
- val createdAt: Long = System.currentTimeMillis(),
- val updatedAt: Long = System.currentTimeMillis()
-)
+ /**
+ * Date of birth (optional)
+ * Stored as Unix timestamp (milliseconds)
+ */
+ val dateOfBirth: Long? = null,
+
+ /**
+ * Relationship to user (optional)
+ * Examples: "Family", "Friend", "Partner", "Child", "Parent", "Sibling", "Colleague", "Other"
+ */
+ val relationship: String? = null,
+
+ /**
+ * When this person was added
+ */
+ val createdAt: Long = System.currentTimeMillis(),
+
+ /**
+ * Last time this person's data was updated
+ */
+ val updatedAt: Long = System.currentTimeMillis()
+) {
+ companion object {
+ /**
+ * Create PersonEntity with optional fields
+ */
+ fun create(
+ name: String,
+ dateOfBirth: Long? = null,
+ relationship: String? = null
+ ): PersonEntity {
+ return PersonEntity(
+ name = name,
+ dateOfBirth = dateOfBirth,
+ relationship = relationship
+ )
+ }
+ }
+
+ /**
+ * Calculate age if date of birth is available
+ */
+ fun getAge(): Int? {
+ if (dateOfBirth == null) return null
+
+ val now = System.currentTimeMillis()
+ val ageInMillis = now - dateOfBirth
+ val ageInYears = ageInMillis / (1000L * 60 * 60 * 24 * 365)
+
+ return ageInYears.toInt()
+ }
+
+ /**
+ * Get relationship emoji
+ */
+ fun getRelationshipEmoji(): String {
+ return when (relationship) {
+ "Family" -> "๐จโ๐ฉโ๐งโ๐ฆ"
+ "Friend" -> "๐ค"
+ "Partner" -> "โค๏ธ"
+ "Child" -> "๐ถ"
+ "Parent" -> "๐ช"
+ "Sibling" -> "๐ซ"
+ "Colleague" -> "๐ผ"
+ else -> "๐ค"
+ }
+ }
+}
/**
* FaceModelEntity - Stores face recognition model (embedding) for a person
*
diff --git a/app/src/main/java/com/placeholder/sherpai2/data/local/entity/ImagePersonEntity.kt b/app/src/main/java/com/placeholder/sherpai2/data/local/entity/ImagePersonEntity.kt
deleted file mode 100644
index f1b3142..0000000
--- a/app/src/main/java/com/placeholder/sherpai2/data/local/entity/ImagePersonEntity.kt
+++ /dev/null
@@ -1,40 +0,0 @@
-package com.placeholder.sherpai2.data.local.entity
-
-import androidx.room.Entity
-import androidx.room.ForeignKey
-import androidx.room.Index
-
-@Entity(
- tableName = "image_persons",
- primaryKeys = ["imageId", "personId"],
- foreignKeys = [
- ForeignKey(
- entity = ImageEntity::class,
- parentColumns = ["imageId"],
- childColumns = ["imageId"],
- onDelete = ForeignKey.CASCADE
- ),
- ForeignKey(
- entity = PersonEntity::class,
- parentColumns = ["id"],
- childColumns = ["personId"],
- onDelete = ForeignKey.CASCADE
- )
- ],
- indices = [
- Index("personId")
- ]
-)
-data class ImagePersonEntity(
-
- val imageId: String,
- val personId: String,
-
- val confidence: Float,
- val confirmed: Boolean,
-
- /**
- * PUBLIC | PRIVATE
- */
- val visibility: String
-)
diff --git a/app/src/main/java/com/placeholder/sherpai2/data/local/entity/TagEntity.kt b/app/src/main/java/com/placeholder/sherpai2/data/local/entity/TagEntity.kt
index 00b7ebc..fde24f0 100644
--- a/app/src/main/java/com/placeholder/sherpai2/data/local/entity/TagEntity.kt
+++ b/app/src/main/java/com/placeholder/sherpai2/data/local/entity/TagEntity.kt
@@ -1,30 +1,119 @@
package com.placeholder.sherpai2.data.local.entity
+import androidx.room.ColumnInfo
import androidx.room.Entity
import androidx.room.PrimaryKey
+import java.util.UUID
/**
- * Represents a conceptual tag.
+ * TagEntity - Normalized tag storage
*
- * Tags are normalized so that:
- * - "park" exists once
- * - many images can reference it
+ * DESIGN:
+ * - Tags exist once (e.g., "vacation")
+ * - Multiple images reference via ImageTagEntity junction table
+ * - Type system: GENERIC | SYSTEM | HIDDEN
*/
@Entity(tableName = "tags")
data class TagEntity(
@PrimaryKey
- val tagId: String,
+ val tagId: String = UUID.randomUUID().toString(),
/**
- * GENERIC | SYSTEM | HIDDEN
+ * Tag type: GENERIC | SYSTEM | HIDDEN
*/
- val type: String,
+ val type: String = TagType.GENERIC,
/**
- * Human-readable value, e.g. "park", "sunset"
+ * Human-readable value, e.g. "vacation", "beach"
*/
val value: String,
- val createdAt: Long
-)
+ /**
+ * When tag was created
+ */
+ val createdAt: Long = System.currentTimeMillis()
+) {
+ companion object {
+ fun createUserTag(value: String): TagEntity {
+ return TagEntity(
+ type = TagType.GENERIC,
+ value = value.trim().lowercase()
+ )
+ }
+
+ fun createSystemTag(value: String): TagEntity {
+ return TagEntity(
+ type = TagType.SYSTEM,
+ value = value.trim().lowercase()
+ )
+ }
+
+ fun createHiddenTag(value: String): TagEntity {
+ return TagEntity(
+ type = TagType.HIDDEN,
+ value = value.trim().lowercase()
+ )
+ }
+ }
+
+ fun isUserTag(): Boolean = type == TagType.GENERIC
+ fun isSystemTag(): Boolean = type == TagType.SYSTEM
+ fun isHiddenTag(): Boolean = type == TagType.HIDDEN
+ fun getDisplayValue(): String = value.replaceFirstChar { it.uppercase() }
+}
+
+/**
+ * TagWithUsage - For queries that include usage count
+ *
+ * Use this for statistics queries
+ */
+data class TagWithUsage(
+ @ColumnInfo(name = "tagId")
+ val tagId: String,
+
+ @ColumnInfo(name = "type")
+ val type: String,
+
+ @ColumnInfo(name = "value")
+ val value: String,
+
+ @ColumnInfo(name = "createdAt")
+ val createdAt: Long,
+
+ @ColumnInfo(name = "usage_count")
+ val usageCount: Int
+) {
+ /**
+ * Convert to TagEntity (without usage count)
+ */
+ fun toTagEntity(): TagEntity {
+ return TagEntity(
+ tagId = tagId,
+ type = type,
+ value = value,
+ createdAt = createdAt
+ )
+ }
+}
+
+/**
+ * Tag type constants
+ */
+object TagType {
+ const val GENERIC = "GENERIC"
+ const val SYSTEM = "SYSTEM"
+ const val HIDDEN = "HIDDEN"
+}
+
+/**
+ * Common system tag values
+ */
+object SystemTags {
+ const val HAS_FACES = "has_faces"
+ const val MULTIPLE_PEOPLE = "multiple_people"
+ const val LANDSCAPE = "landscape"
+ const val PORTRAIT = "portrait"
+ const val LOW_QUALITY = "low_quality"
+ const val BLURRY = "blurry"
+}
\ No newline at end of file
diff --git a/app/src/main/java/com/placeholder/sherpai2/data/local/model/ImageWithEverything.kt b/app/src/main/java/com/placeholder/sherpai2/data/local/model/ImageWithEverything.kt
index ecbd352..2a81fde 100644
--- a/app/src/main/java/com/placeholder/sherpai2/data/local/model/ImageWithEverything.kt
+++ b/app/src/main/java/com/placeholder/sherpai2/data/local/model/ImageWithEverything.kt
@@ -15,12 +15,6 @@ data class ImageWithEverything(
)
val tags: List,
- @Relation(
- parentColumn = "imageId",
- entityColumn = "imageId"
- )
- val persons: List,
-
@Relation(
parentColumn = "imageId",
entityColumn = "imageId"
diff --git a/app/src/main/java/com/placeholder/sherpai2/data/repository/Facerecognitionrepository.kt b/app/src/main/java/com/placeholder/sherpai2/data/repository/Facerecognitionrepository.kt
index a50ccf0..02123ff 100644
--- a/app/src/main/java/com/placeholder/sherpai2/data/repository/Facerecognitionrepository.kt
+++ b/app/src/main/java/com/placeholder/sherpai2/data/repository/Facerecognitionrepository.kt
@@ -332,6 +332,42 @@ class FaceRecognitionRepository @Inject constructor(
faceModelDao.deleteFaceModelById(faceModelId)
}
+ // Add this method to FaceRecognitionRepository_StringIds.kt
+// Replace the existing createPersonWithFaceModel method with this version:
+
+ /**
+ * Create a new person with face model in one operation.
+ * Now supports full PersonEntity with optional fields.
+ *
+ * @param person PersonEntity with name, DOB, relationship, etc.
+ * @return PersonId (String UUID)
+ */
+ suspend fun createPersonWithFaceModel(
+ person: PersonEntity,
+ validImages: List,
+ onProgress: (Int, Int) -> Unit = { _, _ -> }
+ ): String = withContext(Dispatchers.IO) {
+
+ // Insert person with all fields
+ personDao.insert(person)
+
+ // Train face model
+ trainPerson(
+ personId = person.id,
+ validImages = validImages,
+ onProgress = onProgress
+ )
+
+ person.id
+ }
+
+ /**
+ * Get face model by ID
+ */
+ suspend fun getFaceModelById(faceModelId: String): FaceModelEntity? = withContext(Dispatchers.IO) {
+ faceModelDao.getFaceModelById(faceModelId)
+ }
+
suspend fun deleteTagsForImage(imageId: String) {
photoFaceTagDao.deleteTagsForImage(imageId)
}
@@ -339,6 +375,8 @@ class FaceRecognitionRepository @Inject constructor(
fun cleanup() {
faceNetModel.close()
}
+
+
}
data class DetectedFace(
@@ -354,4 +392,5 @@ data class PersonFaceStats(
val taggedPhotoCount: Int,
val averageConfidence: Float,
val lastDetectedAt: Long?
-)
\ No newline at end of file
+)
+
diff --git a/app/src/main/java/com/placeholder/sherpai2/ui/modelinventory/Personinventoryviewmodel.kt b/app/src/main/java/com/placeholder/sherpai2/ui/modelinventory/Personinventoryviewmodel.kt
index 1753852..e1305b2 100644
--- a/app/src/main/java/com/placeholder/sherpai2/ui/modelinventory/Personinventoryviewmodel.kt
+++ b/app/src/main/java/com/placeholder/sherpai2/ui/modelinventory/Personinventoryviewmodel.kt
@@ -14,6 +14,8 @@ import com.placeholder.sherpai2.data.repository.DetectedFace
import com.placeholder.sherpai2.data.repository.FaceRecognitionRepository
import com.placeholder.sherpai2.data.repository.PersonFaceStats
import com.placeholder.sherpai2.domain.repository.ImageRepository
+import com.placeholder.sherpai2.util.DebugFlags
+import com.placeholder.sherpai2.util.DiagnosticLogger
import dagger.hilt.android.lifecycle.HiltViewModel
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.delay
@@ -27,13 +29,11 @@ import kotlinx.coroutines.tasks.await
import javax.inject.Inject
/**
- * PersonInventoryViewModel - Manage trained face models
+ * PersonInventoryViewModel - Single version with feature flags
*
- * Features:
- * - List all trained persons with stats
- * - Delete models
- * - SCAN LIBRARY to find person in all photos
- * - View sample photos
+ * Toggle diagnostics in DebugFlags.kt:
+ * - ENABLE_FACE_RECOGNITION_LOGGING = true/false
+ * - USE_LIBERAL_THRESHOLDS = true/false
*/
@HiltViewModel
class PersonInventoryViewModel @Inject constructor(
@@ -48,13 +48,12 @@ class PersonInventoryViewModel @Inject constructor(
private val _scanningState = MutableStateFlow(ScanningState.Idle)
val scanningState: StateFlow = _scanningState.asStateFlow()
- // ML Kit face detector
private val faceDetector by lazy {
val options = FaceDetectorOptions.Builder()
.setPerformanceMode(FaceDetectorOptions.PERFORMANCE_MODE_ACCURATE)
.setLandmarkMode(FaceDetectorOptions.LANDMARK_MODE_NONE)
.setClassificationMode(FaceDetectorOptions.CLASSIFICATION_MODE_NONE)
- .setMinFaceSize(0.15f)
+ .setMinFaceSize(0.10f) // Lower for better detection
.build()
FaceDetection.getClient(options)
}
@@ -77,12 +76,14 @@ class PersonInventoryViewModel @Inject constructor(
val personName: String,
val progress: Int,
val total: Int,
- val facesFound: Int
+ val facesFound: Int,
+ val facesDetected: Int = 0
) : ScanningState()
data class Complete(
val personName: String,
val facesFound: Int,
- val imagesScanned: Int
+ val imagesScanned: Int,
+ val totalFacesDetected: Int = 0
) : ScanningState()
}
@@ -90,9 +91,6 @@ class PersonInventoryViewModel @Inject constructor(
loadPersons()
}
- /**
- * Load all trained persons with their stats
- */
fun loadPersons() {
viewModelScope.launch {
try {
@@ -119,14 +117,11 @@ class PersonInventoryViewModel @Inject constructor(
}
}
- /**
- * Delete a face model
- */
fun deletePerson(personId: String, faceModelId: String) {
viewModelScope.launch {
try {
faceRecognitionRepository.deleteFaceModel(faceModelId)
- loadPersons() // Refresh list
+ loadPersons()
} catch (e: Exception) {
_uiState.value = InventoryUiState.Error(
"Failed to delete: ${e.message}"
@@ -136,21 +131,17 @@ class PersonInventoryViewModel @Inject constructor(
}
/**
- * Scan entire photo library for a specific person
- *
- * Process:
- * 1. Get all images from library
- * 2. For each image:
- * - Detect faces using ML Kit
- * - Generate embeddings for detected faces
- * - Compare to person's face model
- * - Create PhotoFaceTagEntity if match found
- * 3. Update progress throughout
+ * Scan library with optional diagnostic logging
*/
fun scanLibraryForPerson(personId: String, faceModelId: String) {
viewModelScope.launch {
try {
- // Get person name for UI
+ if (DebugFlags.ENABLE_FACE_RECOGNITION_LOGGING) {
+ DiagnosticLogger.i("=== STARTING LIBRARY SCAN ===")
+ DiagnosticLogger.i("PersonId: $personId")
+ DiagnosticLogger.i("FaceModelId: $faceModelId")
+ }
+
val currentState = _uiState.value
val person = if (currentState is InventoryUiState.Success) {
currentState.persons.find { it.person.id == personId }?.person
@@ -158,69 +149,104 @@ class PersonInventoryViewModel @Inject constructor(
val personName = person?.name ?: "Unknown"
- // Get all images from library
+ // Get face model to determine training count
+ val faceModel = faceRecognitionRepository.getFaceModelById(faceModelId)
+ val trainingCount = faceModel?.trainingImageCount ?: 15
+
+ // Dynamic threshold based on training data and debug flag
+ val scanThreshold = if (DebugFlags.USE_LIBERAL_THRESHOLDS) {
+ when {
+ trainingCount < 20 -> 0.48f // Very liberal
+ trainingCount < 30 -> 0.52f // Liberal
+ else -> 0.58f // Moderate
+ }
+ } else {
+ when {
+ trainingCount < 20 -> 0.55f // Moderate
+ trainingCount < 30 -> 0.60f // Conservative
+ else -> 0.65f // Strict
+ }
+ }
+
+ DiagnosticLogger.i("Training count: $trainingCount")
+ DiagnosticLogger.i("Using threshold: $scanThreshold")
+
val allImages = imageRepository.getAllImages().first()
val totalImages = allImages.size
+ DiagnosticLogger.i("Total images in library: $totalImages")
+
_scanningState.value = ScanningState.Scanning(
personId = personId,
personName = personName,
progress = 0,
total = totalImages,
- facesFound = 0
+ facesFound = 0,
+ facesDetected = 0
)
var facesFound = 0
+ var totalFacesDetected = 0
- // Scan each image
allImages.forEachIndexed { index, imageWithEverything ->
val image = imageWithEverything.image
- // Detect faces in this image
+ DiagnosticLogger.d("--- Image ${index + 1}/$totalImages ---")
+ DiagnosticLogger.d("ImageId: ${image.imageId}")
+
val detectedFaces = detectFacesInImage(image.imageUri)
+ totalFacesDetected += detectedFaces.size
+
+ DiagnosticLogger.d("Faces detected: ${detectedFaces.size}")
if (detectedFaces.isNotEmpty()) {
- // Scan this image for the person
val tags = faceRecognitionRepository.scanImage(
imageId = image.imageId,
detectedFaces = detectedFaces,
- threshold = 0.6f // Slightly lower threshold for library scanning
+ threshold = scanThreshold
)
- // Count how many faces matched this person
- val matchingTags = tags.filter { tag ->
- // Check if this tag belongs to our target person's face model
- tag.faceModelId == faceModelId
+ DiagnosticLogger.d("Tags created: ${tags.size}")
+
+ tags.forEach { tag ->
+ DiagnosticLogger.d(" Tag: model=${tag.faceModelId.take(8)}, conf=${String.format("%.3f", tag.confidence)}")
}
+ val matchingTags = tags.filter { it.faceModelId == faceModelId }
+ DiagnosticLogger.d("Matching tags for target: ${matchingTags.size}")
+
facesFound += matchingTags.size
}
- // Update progress
_scanningState.value = ScanningState.Scanning(
personId = personId,
personName = personName,
progress = index + 1,
total = totalImages,
- facesFound = facesFound
+ facesFound = facesFound,
+ facesDetected = totalFacesDetected
)
}
- // Scan complete
+ DiagnosticLogger.i("=== SCAN COMPLETE ===")
+ DiagnosticLogger.i("Images scanned: $totalImages")
+ DiagnosticLogger.i("Faces detected: $totalFacesDetected")
+ DiagnosticLogger.i("Faces matched: $facesFound")
+ DiagnosticLogger.i("Hit rate: ${if (totalFacesDetected > 0) (facesFound * 100 / totalFacesDetected) else 0}%")
+
_scanningState.value = ScanningState.Complete(
personName = personName,
facesFound = facesFound,
- imagesScanned = totalImages
+ imagesScanned = totalImages,
+ totalFacesDetected = totalFacesDetected
)
- // Refresh the list to show updated counts
loadPersons()
-
- // Reset scanning state after 3 seconds
delay(3000)
_scanningState.value = ScanningState.Idle
} catch (e: Exception) {
+ DiagnosticLogger.e("Scan failed", e)
_scanningState.value = ScanningState.Idle
_uiState.value = InventoryUiState.Error(
"Scan failed: ${e.message}"
@@ -229,33 +255,28 @@ class PersonInventoryViewModel @Inject constructor(
}
}
- /**
- * Detect faces in an image using ML Kit
- *
- * @param imageUri URI of the image to scan
- * @return List of detected faces with cropped bitmaps
- */
private suspend fun detectFacesInImage(imageUri: String): List = withContext(Dispatchers.Default) {
try {
- // Load bitmap from URI
val uri = Uri.parse(imageUri)
val inputStream = getApplication().contentResolver.openInputStream(uri)
val bitmap = BitmapFactory.decodeStream(inputStream)
inputStream?.close()
- if (bitmap == null) return@withContext emptyList()
+ if (bitmap == null) {
+ DiagnosticLogger.w("Failed to load bitmap from: $imageUri")
+ return@withContext emptyList()
+ }
+
+ DiagnosticLogger.d("Bitmap: ${bitmap.width}x${bitmap.height}")
- // Create ML Kit input image
val image = InputImage.fromBitmap(bitmap, 0)
-
- // Detect faces (await the Task)
val faces = faceDetector.process(image).await()
- // Convert to DetectedFace objects
+ DiagnosticLogger.d("ML Kit found ${faces.size} faces")
+
faces.mapNotNull { face ->
val boundingBox = face.boundingBox
- // Crop face from bitmap with bounds checking
val croppedFace = try {
val left = boundingBox.left.coerceAtLeast(0)
val top = boundingBox.top.coerceAtLeast(0)
@@ -268,6 +289,7 @@ class PersonInventoryViewModel @Inject constructor(
null
}
} catch (e: Exception) {
+ DiagnosticLogger.e("Face crop failed", e)
null
}
@@ -282,13 +304,11 @@ class PersonInventoryViewModel @Inject constructor(
}
} catch (e: Exception) {
+ DiagnosticLogger.e("Face detection failed: $imageUri", e)
emptyList()
}
}
- /**
- * Get sample images for a person
- */
suspend fun getPersonImages(personId: String) =
faceRecognitionRepository.getImagesForPerson(personId)
diff --git a/app/src/main/java/com/placeholder/sherpai2/ui/trainingprep/ImageSelectorScreen.kt b/app/src/main/java/com/placeholder/sherpai2/ui/trainingprep/ImageSelectorScreen.kt
index 6384f64..5a626e1 100644
--- a/app/src/main/java/com/placeholder/sherpai2/ui/trainingprep/ImageSelectorScreen.kt
+++ b/app/src/main/java/com/placeholder/sherpai2/ui/trainingprep/ImageSelectorScreen.kt
@@ -3,128 +3,338 @@ package com.placeholder.sherpai2.ui.trainingprep
import android.net.Uri
import androidx.activity.compose.rememberLauncherForActivityResult
import androidx.activity.result.contract.ActivityResultContracts
+import androidx.compose.animation.AnimatedVisibility
+import androidx.compose.foundation.background
import androidx.compose.foundation.layout.*
-
import androidx.compose.foundation.lazy.grid.GridCells
import androidx.compose.foundation.lazy.grid.LazyVerticalGrid
-
-import androidx.compose.foundation.shape.CircleShape
+import androidx.compose.foundation.lazy.grid.items
import androidx.compose.foundation.shape.RoundedCornerShape
import androidx.compose.material.icons.Icons
-import androidx.compose.material.icons.filled.AddPhotoAlternate
-import androidx.compose.material.icons.filled.Close
+import androidx.compose.material.icons.filled.*
import androidx.compose.material3.*
import androidx.compose.runtime.*
import androidx.compose.ui.Alignment
import androidx.compose.ui.Modifier
-import androidx.compose.ui.layout.ContentScale
+import androidx.compose.ui.graphics.Brush
+import androidx.compose.ui.graphics.Color
+import androidx.compose.ui.text.font.FontWeight
import androidx.compose.ui.unit.dp
-import androidx.compose.material3.Text
-import androidx.compose.runtime.saveable.rememberSaveable
-import androidx.compose.ui.draw.clip
-import androidx.compose.ui.platform.LocalContext
-import coil.compose.AsyncImage
-import androidx.compose.foundation.lazy.grid.items
-
+/**
+ * Enhanced ImageSelectorScreen
+ *
+ * Changes:
+ * - NO LIMIT on photo count (was 10)
+ * - Recommends 20-30 photos
+ * - Real-time progress feedback
+ * - Quality indicators
+ * - Training tips
+ */
@OptIn(ExperimentalMaterial3Api::class)
@Composable
fun ImageSelectorScreen(
onImagesSelected: (List) -> Unit
) {
- //1. Persist state across configuration changes
- var selectedUris by rememberSaveable { mutableStateOf>(emptyList()) }
- val context = LocalContext.current
+ var selectedImages by remember { mutableStateOf>(emptyList()) }
- val launcher = rememberLauncherForActivityResult(
- ActivityResultContracts.OpenMultipleDocuments()
+ val photoPicker = rememberLauncherForActivityResult(
+ contract = ActivityResultContracts.GetMultipleContents()
) { uris ->
- // 2. Take first 10 and try to persist permissions
- val limitedUris = uris.take(10)
- selectedUris = limitedUris
+ if (uris.isNotEmpty()) {
+ selectedImages = uris
+ }
}
Scaffold(
- topBar = { TopAppBar(title = { Text("Select Training Photos") }) }
- ) { padding ->
+ topBar = {
+ TopAppBar(
+ title = { Text("Select Training Photos") },
+ colors = TopAppBarDefaults.topAppBarColors(
+ containerColor = MaterialTheme.colorScheme.primaryContainer
+ )
+ )
+ }
+ ) { paddingValues ->
Column(
modifier = Modifier
- .padding(padding)
- .padding(16.dp)
- .fillMaxSize(),
+ .fillMaxSize()
+ .padding(paddingValues)
+ .padding(16.dp),
verticalArrangement = Arrangement.spacedBy(16.dp)
) {
- OutlinedCard(
- onClick = { launcher.launch(arrayOf("image/*")) },
- modifier = Modifier.fillMaxWidth()
+
+ // Gradient header with tips
+ Card(
+ modifier = Modifier.fillMaxWidth(),
+ colors = CardDefaults.cardColors(
+ containerColor = MaterialTheme.colorScheme.primaryContainer
+ ),
+ shape = RoundedCornerShape(16.dp)
) {
Column(
- modifier = Modifier.padding(24.dp),
- horizontalAlignment = Alignment.CenterHorizontally
+ modifier = Modifier.padding(20.dp),
+ verticalArrangement = Arrangement.spacedBy(12.dp)
) {
- Icon(Icons.Default.AddPhotoAlternate, contentDescription = null)
- Spacer(Modifier.height(8.dp))
- Text("Select up to 10 images of the person")
+ Row(
+ horizontalArrangement = Arrangement.spacedBy(12.dp),
+ verticalAlignment = Alignment.CenterVertically
+ ) {
+ Surface(
+ shape = RoundedCornerShape(12.dp),
+ color = MaterialTheme.colorScheme.primary,
+ modifier = Modifier.size(48.dp)
+ ) {
+ Box(contentAlignment = Alignment.Center) {
+ Icon(
+ Icons.Default.PhotoCamera,
+ contentDescription = null,
+ tint = MaterialTheme.colorScheme.onPrimary,
+ modifier = Modifier.size(28.dp)
+ )
+ }
+ }
+
+ Column {
+ Text(
+ "Training Tips",
+ style = MaterialTheme.typography.titleLarge,
+ fontWeight = FontWeight.Bold
+ )
+ Text(
+ "More photos = better recognition",
+ style = MaterialTheme.typography.bodyMedium,
+ color = MaterialTheme.colorScheme.onPrimaryContainer.copy(alpha = 0.7f)
+ )
+ }
+ }
+
+ Spacer(Modifier.height(4.dp))
+
+ TipItem("โ Select 20-30 photos for best results", true)
+ TipItem("โ Include different angles and lighting", true)
+ TipItem("โ Mix expressions (smile, neutral, laugh)", true)
+ TipItem("โ With/without glasses if applicable", true)
+ TipItem("โ Avoid blurry or very dark photos", false)
+ }
+ }
+
+ // Progress indicator
+ AnimatedVisibility(selectedImages.isNotEmpty()) {
+ ProgressCard(selectedImages.size)
+ }
+
+ Spacer(Modifier.weight(1f))
+
+ // Select photos button
+ Button(
+ onClick = { photoPicker.launch("image/*") },
+ modifier = Modifier.fillMaxWidth(),
+ colors = ButtonDefaults.buttonColors(
+ containerColor = MaterialTheme.colorScheme.primary
+ ),
+ contentPadding = PaddingValues(vertical = 16.dp)
+ ) {
+ Icon(Icons.Default.PhotoLibrary, contentDescription = null)
+ Spacer(Modifier.width(8.dp))
+ Text(
+ if (selectedImages.isEmpty()) {
+ "Select Training Photos"
+ } else {
+ "Selected: ${selectedImages.size} photos - Tap to change"
+ },
+ style = MaterialTheme.typography.titleMedium
+ )
+ }
+
+ // Continue button
+ AnimatedVisibility(selectedImages.size >= 15) {
+ Button(
+ onClick = { onImagesSelected(selectedImages) },
+ modifier = Modifier.fillMaxWidth(),
+ colors = ButtonDefaults.buttonColors(
+ containerColor = MaterialTheme.colorScheme.secondary
+ ),
+ contentPadding = PaddingValues(vertical = 16.dp)
+ ) {
+ Icon(Icons.Default.Check, contentDescription = null)
+ Spacer(Modifier.width(8.dp))
Text(
- text = "${selectedUris.size} / 10 selected",
- style = MaterialTheme.typography.labelLarge,
- color = if (selectedUris.size == 10) MaterialTheme.colorScheme.error
- else if (selectedUris.isNotEmpty()) MaterialTheme.colorScheme.primary
- else MaterialTheme.colorScheme.outline
+ "Continue with ${selectedImages.size} photos",
+ style = MaterialTheme.typography.titleMedium
)
}
}
- // 3. Conditional rendering for empty state
- if (selectedUris.isEmpty()) {
- Box(Modifier
- .weight(1f)
- .fillMaxWidth(), contentAlignment = Alignment.Center) {
- Text("No images selected", style = MaterialTheme.typography.bodyMedium)
- }
- } else {
- LazyVerticalGrid(
- columns = GridCells.Fixed(3),
- modifier = Modifier.weight(1f),
- contentPadding = PaddingValues(4.dp)
+ // Minimum warning
+ if (selectedImages.isNotEmpty() && selectedImages.size < 15) {
+ Card(
+ modifier = Modifier.fillMaxWidth(),
+ colors = CardDefaults.cardColors(
+ containerColor = MaterialTheme.colorScheme.errorContainer
+ )
) {
- items(selectedUris, key = { it.toString() }) { uri ->
- Box(modifier = Modifier.padding(4.dp)) {
- AsyncImage(
- model = uri,
- contentDescription = null,
- modifier = Modifier
- .aspectRatio(1f)
- .clip(RoundedCornerShape(8.dp)),
- contentScale = ContentScale.Crop
+ Row(
+ modifier = Modifier.padding(16.dp),
+ horizontalArrangement = Arrangement.spacedBy(12.dp),
+ verticalAlignment = Alignment.CenterVertically
+ ) {
+ Icon(
+ Icons.Default.Warning,
+ contentDescription = null,
+ tint = MaterialTheme.colorScheme.error
+ )
+ Column {
+ Text(
+ "Need at least 15 photos",
+ style = MaterialTheme.typography.titleSmall,
+ fontWeight = FontWeight.Bold,
+ color = MaterialTheme.colorScheme.onErrorContainer
+ )
+ Text(
+ "You have ${selectedImages.size}. Select ${15 - selectedImages.size} more.",
+ style = MaterialTheme.typography.bodySmall,
+ color = MaterialTheme.colorScheme.onErrorContainer.copy(alpha = 0.8f)
)
- // 4. Ability to remove specific images
- Surface(
- onClick = { selectedUris = selectedUris - uri },
- modifier = Modifier
- .align(Alignment.TopEnd)
- .padding(4.dp),
- shape = CircleShape,
- color = MaterialTheme.colorScheme.surfaceVariant.copy(alpha = 0.8f)
- ) {
- Icon(
- Icons.Default.Close,
- contentDescription = "Remove",
- modifier = Modifier.size(16.dp)
- )
- }
}
}
}
}
-
- Button(
- modifier = Modifier.fillMaxWidth(),
- enabled = selectedUris.isNotEmpty(),
- onClick = { onImagesSelected(selectedUris) }
- ) {
- Text("Start Face Detection")
- }
}
}
}
+
+@Composable
+private fun TipItem(text: String, isGood: Boolean) {
+ Row(
+ horizontalArrangement = Arrangement.spacedBy(8.dp),
+ verticalAlignment = Alignment.Top
+ ) {
+ Icon(
+ if (isGood) Icons.Default.CheckCircle else Icons.Default.Cancel,
+ contentDescription = null,
+ modifier = Modifier.size(18.dp),
+ tint = if (isGood) {
+ MaterialTheme.colorScheme.primary
+ } else {
+ MaterialTheme.colorScheme.error
+ }
+ )
+ Text(
+ text = text,
+ style = MaterialTheme.typography.bodyMedium,
+ color = MaterialTheme.colorScheme.onPrimaryContainer
+ )
+ }
+}
+
+@Composable
+private fun ProgressCard(photoCount: Int) {
+ Card(
+ modifier = Modifier.fillMaxWidth(),
+ colors = CardDefaults.cardColors(
+ containerColor = when {
+ photoCount >= 25 -> MaterialTheme.colorScheme.primaryContainer
+ photoCount >= 20 -> MaterialTheme.colorScheme.tertiaryContainer
+ else -> MaterialTheme.colorScheme.surfaceVariant
+ }
+ ),
+ shape = RoundedCornerShape(16.dp)
+ ) {
+ Column(
+ modifier = Modifier.padding(20.dp),
+ verticalArrangement = Arrangement.spacedBy(12.dp)
+ ) {
+ Row(
+ modifier = Modifier.fillMaxWidth(),
+ horizontalArrangement = Arrangement.SpaceBetween,
+ verticalAlignment = Alignment.CenterVertically
+ ) {
+ Column {
+ Text(
+ text = "$photoCount photos selected",
+ style = MaterialTheme.typography.titleMedium,
+ fontWeight = FontWeight.Bold
+ )
+ Text(
+ text = when {
+ photoCount >= 30 -> "Excellent! Maximum diversity"
+ photoCount >= 25 -> "Great! Very good coverage"
+ photoCount >= 20 -> "Good! Should work well"
+ photoCount >= 15 -> "Acceptable - more is better"
+ else -> "Need ${15 - photoCount} more"
+ },
+ style = MaterialTheme.typography.bodyMedium,
+ color = MaterialTheme.colorScheme.onSurfaceVariant
+ )
+ }
+
+ Surface(
+ shape = RoundedCornerShape(12.dp),
+ color = when {
+ photoCount >= 25 -> MaterialTheme.colorScheme.primary
+ photoCount >= 20 -> MaterialTheme.colorScheme.tertiary
+ photoCount >= 15 -> MaterialTheme.colorScheme.secondary
+ else -> MaterialTheme.colorScheme.outline
+ },
+ modifier = Modifier.size(56.dp)
+ ) {
+ Box(contentAlignment = Alignment.Center) {
+ Text(
+ text = when {
+ photoCount >= 25 -> "โ
"
+ photoCount >= 20 -> "โ"
+ photoCount >= 15 -> "โ"
+ else -> "..."
+ },
+ style = MaterialTheme.typography.headlineMedium,
+ color = Color.White
+ )
+ }
+ }
+ }
+
+ // Progress bar
+ LinearProgressIndicator(
+ progress = { (photoCount / 30f).coerceAtMost(1f) },
+ modifier = Modifier
+ .fillMaxWidth()
+ .height(8.dp),
+ color = when {
+ photoCount >= 25 -> MaterialTheme.colorScheme.primary
+ photoCount >= 20 -> MaterialTheme.colorScheme.tertiary
+ else -> MaterialTheme.colorScheme.secondary
+ },
+ trackColor = MaterialTheme.colorScheme.surfaceVariant,
+ )
+
+ // Expected accuracy
+ Row(
+ modifier = Modifier.fillMaxWidth(),
+ horizontalArrangement = Arrangement.SpaceBetween
+ ) {
+ Text(
+ "Expected accuracy:",
+ style = MaterialTheme.typography.labelMedium,
+ color = MaterialTheme.colorScheme.onSurfaceVariant
+ )
+ Text(
+ when {
+ photoCount >= 30 -> "90-95%"
+ photoCount >= 25 -> "85-90%"
+ photoCount >= 20 -> "80-85%"
+ photoCount >= 15 -> "75-80%"
+ else -> "< 75%"
+ },
+ style = MaterialTheme.typography.labelLarge,
+ fontWeight = FontWeight.Bold,
+ color = when {
+ photoCount >= 25 -> MaterialTheme.colorScheme.primary
+ photoCount >= 20 -> MaterialTheme.colorScheme.tertiary
+ else -> MaterialTheme.colorScheme.secondary
+ }
+ )
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/app/src/main/java/com/placeholder/sherpai2/ui/trainingprep/TrainViewModel.kt b/app/src/main/java/com/placeholder/sherpai2/ui/trainingprep/TrainViewModel.kt
index e890485..bacb832 100644
--- a/app/src/main/java/com/placeholder/sherpai2/ui/trainingprep/TrainViewModel.kt
+++ b/app/src/main/java/com/placeholder/sherpai2/ui/trainingprep/TrainViewModel.kt
@@ -5,6 +5,7 @@ import android.graphics.Bitmap
import android.net.Uri
import androidx.lifecycle.AndroidViewModel
import androidx.lifecycle.viewModelScope
+import com.placeholder.sherpai2.data.local.entity.PersonEntity
import com.placeholder.sherpai2.data.repository.FaceRecognitionRepository
import com.placeholder.sherpai2.ml.FaceNetModel
import dagger.hilt.android.lifecycle.HiltViewModel
@@ -14,9 +15,6 @@ import kotlinx.coroutines.flow.asStateFlow
import kotlinx.coroutines.launch
import javax.inject.Inject
-/**
- * State for image scanning and validation
- */
sealed class ScanningState {
object Idle : ScanningState()
data class Processing(val progress: Int, val total: Int) : ScanningState()
@@ -26,26 +24,26 @@ sealed class ScanningState {
data class Error(val message: String) : ScanningState()
}
-/**
- * State for face model training/creation
- */
sealed class TrainingState {
object Idle : TrainingState()
data class Processing(val stage: String, val progress: Int, val total: Int) : TrainingState()
- data class Success(val personName: String, val personId: String) : TrainingState()
+ data class Success(
+ val personName: String,
+ val personId: String,
+ val relationship: String?
+ ) : TrainingState()
data class Error(val message: String) : TrainingState()
}
/**
- * ViewModel for training face recognition models
- *
- * WORKFLOW:
- * 1. User selects 10+ images โ scanAndTagFaces()
- * 2. Images validated โ Success state with validImagesWithFaces
- * 3. User can replace images or pick faces from group photos
- * 4. When ready โ createFaceModel(personName)
- * 5. Creates PersonEntity + FaceModelEntity in database
+ * Person info captured before photo selection
*/
+data class PersonInfo(
+ val name: String,
+ val dateOfBirth: Long?,
+ val relationship: String
+)
+
@HiltViewModel
class TrainViewModel @Inject constructor(
application: Application,
@@ -56,18 +54,16 @@ class TrainViewModel @Inject constructor(
private val sanityChecker = TrainingSanityChecker(application)
private val faceDetectionHelper = FaceDetectionHelper(application)
- // Scanning/validation state
private val _uiState = MutableStateFlow(ScanningState.Idle)
val uiState: StateFlow = _uiState.asStateFlow()
- // Training/model creation state
private val _trainingState = MutableStateFlow(TrainingState.Idle)
val trainingState: StateFlow = _trainingState.asStateFlow()
- // Keep track of current images for replacements
- private var currentImageUris: List = emptyList()
+ // Store person info for later use during training
+ private var personInfo: PersonInfo? = null
- // Keep track of manual face selections (imageUri -> selectedFaceIndex)
+ private var currentImageUris: List = emptyList()
private val manualFaceSelections = mutableMapOf()
data class ManualFaceSelection(
@@ -75,28 +71,15 @@ class TrainViewModel @Inject constructor(
val croppedFaceBitmap: Bitmap
)
- // ======================
- // FACE MODEL CREATION
- // ======================
+ /**
+ * Store person info before photo selection
+ */
+ fun setPersonInfo(name: String, dateOfBirth: Long?, relationship: String) {
+ personInfo = PersonInfo(name, dateOfBirth, relationship)
+ }
/**
- * Create face model from validated training images.
- *
- * COMPLETE PROCESS:
- * 1. Verify we have 10+ validated images
- * 2. Call repository to create PersonEntity + FaceModelEntity
- * 3. Repository handles: embedding generation, averaging, database save
- *
- * Call this when user clicks "Continue to Training" after validation passes.
- *
- * @param personName Name for the new person
- *
- * EXAMPLE USAGE IN UI:
- * if (result.isValid) {
- * showNameDialog { name ->
- * trainViewModel.createFaceModel(name)
- * }
- * }
+ * Create face model with captured person info
*/
fun createFaceModel(personName: String) {
val currentState = _uiState.value
@@ -106,8 +89,10 @@ class TrainViewModel @Inject constructor(
}
val validImages = currentState.sanityCheckResult.validImagesWithFaces
- if (validImages.size < 10) {
- _trainingState.value = TrainingState.Error("Need at least 10 valid images, have ${validImages.size}")
+ if (validImages.size < 15) { // Updated minimum
+ _trainingState.value = TrainingState.Error(
+ "Need at least 15 valid images, have ${validImages.size}"
+ )
return
}
@@ -119,13 +104,16 @@ class TrainViewModel @Inject constructor(
total = validImages.size
)
- // Repository handles everything:
- // - Creates PersonEntity in 'persons' table
- // - Generates embeddings from face bitmaps
- // - Averages embeddings
- // - Creates FaceModelEntity linked to PersonEntity
+ // Create person with captured info
+ val person = PersonEntity.create(
+ name = personName,
+ dateOfBirth = personInfo?.dateOfBirth,
+ relationship = personInfo?.relationship
+ )
+
+ // Create person with face model
val personId = faceRecognitionRepository.createPersonWithFaceModel(
- personName = personName,
+ person = person, // Pass full PersonEntity now
validImages = validImages,
onProgress = { current, total ->
_trainingState.value = TrainingState.Processing(
@@ -138,7 +126,8 @@ class TrainViewModel @Inject constructor(
_trainingState.value = TrainingState.Success(
personName = personName,
- personId = personId
+ personId = personId,
+ relationship = person.relationship
)
} catch (e: Exception) {
@@ -149,40 +138,16 @@ class TrainViewModel @Inject constructor(
}
}
- /**
- * Reset training state back to idle.
- * Call this after handling success/error.
- */
fun resetTrainingState() {
_trainingState.value = TrainingState.Idle
}
- // ======================
- // IMAGE VALIDATION
- // ======================
-
- /**
- * Scan and validate images for training.
- *
- * PROCESS:
- * 1. Face detection on all images
- * 2. Duplicate checking
- * 3. Validation against requirements (10+ images, one face per image)
- *
- * @param imageUris List of image URIs selected by user
- */
fun scanAndTagFaces(imageUris: List) {
currentImageUris = imageUris
manualFaceSelections.clear()
performScan(imageUris)
}
- /**
- * Replace a single image and re-scan all images.
- *
- * @param oldUri Image to replace
- * @param newUri New image
- */
fun replaceImage(oldUri: Uri, newUri: Uri) {
viewModelScope.launch {
val updatedUris = currentImageUris.toMutableList()
@@ -191,27 +156,15 @@ class TrainViewModel @Inject constructor(
if (index != -1) {
updatedUris[index] = newUri
currentImageUris = updatedUris
-
- // Remove manual selection for old URI if any
manualFaceSelections.remove(oldUri)
-
- // Re-scan all images
performScan(currentImageUris)
}
}
}
- /**
- * User manually selected a face from a multi-face image.
- *
- * @param imageUri Image with multiple faces
- * @param faceIndex Which face the user selected (0-based)
- * @param croppedFaceBitmap Cropped face bitmap
- */
fun selectFaceFromImage(imageUri: Uri, faceIndex: Int, croppedFaceBitmap: Bitmap) {
manualFaceSelections[imageUri] = ManualFaceSelection(faceIndex, croppedFaceBitmap)
- // Re-process the results with the manual selection
val currentState = _uiState.value
if (currentState is ScanningState.Success) {
val updatedResult = applyManualSelections(currentState.sanityCheckResult)
@@ -219,25 +172,19 @@ class TrainViewModel @Inject constructor(
}
}
- /**
- * Perform the actual scanning.
- */
private fun performScan(imageUris: List) {
viewModelScope.launch {
try {
_uiState.value = ScanningState.Processing(0, imageUris.size)
- // Perform sanity checks
val result = sanityChecker.performSanityChecks(
imageUris = imageUris,
- minImagesRequired = 10,
- allowMultipleFaces = true, // Allow multiple faces - user can pick
+ minImagesRequired = 15, // Updated minimum
+ allowMultipleFaces = true,
duplicateSimilarityThreshold = 0.95
)
- // Apply any manual face selections
val finalResult = applyManualSelections(result)
-
_uiState.value = ScanningState.Success(finalResult)
} catch (e: Exception) {
@@ -248,26 +195,19 @@ class TrainViewModel @Inject constructor(
}
}
- /**
- * Apply manual face selections to the results.
- */
private fun applyManualSelections(
result: TrainingSanityChecker.SanityCheckResult
): TrainingSanityChecker.SanityCheckResult {
- // If no manual selections, return original
if (manualFaceSelections.isEmpty()) {
return result
}
- // Update face detection results with manual selections
val updatedFaceResults = result.faceDetectionResults.map { faceResult ->
val manualSelection = manualFaceSelections[faceResult.uri]
if (manualSelection != null) {
- // Replace the cropped face with the manually selected one
faceResult.copy(
croppedFaceBitmap = manualSelection.croppedFaceBitmap,
- // Treat as single face since user selected one
faceCount = 1
)
} else {
@@ -275,12 +215,11 @@ class TrainViewModel @Inject constructor(
}
}
- // Update valid images list
val updatedValidImages = updatedFaceResults
.filter { it.hasFace }
.filter { it.croppedFaceBitmap != null }
.filter { it.errorMessage == null }
- .filter { it.faceCount >= 1 } // Now accept if user picked a face
+ .filter { it.faceCount >= 1 }
.map { result ->
TrainingSanityChecker.ValidTrainingImage(
uri = result.uri,
@@ -289,31 +228,27 @@ class TrainViewModel @Inject constructor(
)
}
- // Recalculate validation errors
val updatedErrors = result.validationErrors.toMutableList()
- // Remove multiple face errors for images with manual selections
updatedErrors.removeAll { error ->
error is TrainingSanityChecker.ValidationError.MultipleFacesDetected &&
manualFaceSelections.containsKey(error.uri)
}
- // Check if we have enough valid images now
- if (updatedValidImages.size < 10) {
+ if (updatedValidImages.size < 15) { // Updated minimum
if (updatedErrors.none { it is TrainingSanityChecker.ValidationError.InsufficientImages }) {
updatedErrors.add(
TrainingSanityChecker.ValidationError.InsufficientImages(
- required = 10,
+ required = 15,
available = updatedValidImages.size
)
)
}
} else {
- // Remove insufficient images error if we now have enough
updatedErrors.removeAll { it is TrainingSanityChecker.ValidationError.InsufficientImages }
}
- val isValid = updatedErrors.isEmpty() && updatedValidImages.size >= 10
+ val isValid = updatedErrors.isEmpty() && updatedValidImages.size >= 15
return result.copy(
isValid = isValid,
@@ -323,21 +258,16 @@ class TrainViewModel @Inject constructor(
)
}
- /**
- * Get formatted error messages.
- */
fun getFormattedErrors(result: TrainingSanityChecker.SanityCheckResult): List {
return sanityChecker.formatValidationErrors(result.validationErrors)
}
- /**
- * Reset to idle state.
- */
fun reset() {
_uiState.value = ScanningState.Idle
_trainingState.value = TrainingState.Idle
currentImageUris = emptyList()
manualFaceSelections.clear()
+ personInfo = null
}
override fun onCleared() {
@@ -348,13 +278,7 @@ class TrainViewModel @Inject constructor(
}
}
-// ======================
-// EXTENSION FUNCTIONS
-// ======================
-
-/**
- * Extension to copy FaceDetectionResult with modifications.
- */
+// Extension functions for copying results
private fun FaceDetectionHelper.FaceDetectionResult.copy(
uri: Uri = this.uri,
hasFace: Boolean = this.hasFace,
@@ -373,9 +297,6 @@ private fun FaceDetectionHelper.FaceDetectionResult.copy(
)
}
-/**
- * Extension to copy SanityCheckResult with modifications.
- */
private fun TrainingSanityChecker.SanityCheckResult.copy(
isValid: Boolean = this.isValid,
faceDetectionResults: List = this.faceDetectionResults,
diff --git a/app/src/main/java/com/placeholder/sherpai2/ui/trainingprep/TrainingScreen.kt b/app/src/main/java/com/placeholder/sherpai2/ui/trainingprep/TrainingScreen.kt
index 9088a0e..5f9f1bc 100644
--- a/app/src/main/java/com/placeholder/sherpai2/ui/trainingprep/TrainingScreen.kt
+++ b/app/src/main/java/com/placeholder/sherpai2/ui/trainingprep/TrainingScreen.kt
@@ -1,31 +1,516 @@
package com.placeholder.sherpai2.ui.trainingprep
-import androidx.compose.foundation.layout.padding
-import androidx.compose.material3.Button
-import androidx.compose.material3.ExperimentalMaterial3Api
-import androidx.compose.material3.Scaffold
-import androidx.compose.material3.Text
-import androidx.compose.material3.TopAppBar
-import androidx.compose.runtime.Composable
+import androidx.compose.animation.AnimatedVisibility
+import androidx.compose.foundation.background
+import androidx.compose.foundation.layout.*
+import androidx.compose.foundation.rememberScrollState
+import androidx.compose.foundation.shape.RoundedCornerShape
+import androidx.compose.foundation.verticalScroll
+import androidx.compose.material.icons.Icons
+import androidx.compose.material.icons.filled.*
+import androidx.compose.material3.*
+import androidx.compose.runtime.*
+import androidx.compose.ui.Alignment
import androidx.compose.ui.Modifier
-import androidx.hilt.lifecycle.viewmodel.compose.hiltViewModel
+import androidx.compose.ui.graphics.Brush
+import androidx.compose.ui.text.font.FontWeight
+import androidx.compose.ui.text.style.TextAlign
+import androidx.compose.ui.unit.dp
+import java.text.SimpleDateFormat
+import java.util.*
+
+/**
+ * Beautiful TrainingScreen with person info capture
+ *
+ * Features:
+ * - Name input
+ * - Date of birth picker
+ * - Relationship selector
+ * - Onboarding cards
+ * - Beautiful gradient design
+ * - Clear call to action
+ */
@OptIn(ExperimentalMaterial3Api::class)
@Composable
fun TrainingScreen(
- onSelectImages: () -> Unit
+ onSelectImages: () -> Unit,
+ modifier: Modifier = Modifier
) {
+ var showInfoDialog by remember { mutableStateOf(false) }
+
Scaffold(
topBar = {
TopAppBar(
- title = { Text("Training") }
+ title = { Text("Train New Person") },
+ colors = TopAppBarDefaults.topAppBarColors(
+ containerColor = MaterialTheme.colorScheme.primaryContainer
+ )
)
}
- ) { padding ->
- Button(
- modifier = Modifier.padding(padding),
- onClick = onSelectImages
+ ) { paddingValues ->
+ Column(
+ modifier = modifier
+ .fillMaxSize()
+ .padding(paddingValues)
+ .verticalScroll(rememberScrollState())
+ .padding(20.dp),
+ verticalArrangement = Arrangement.spacedBy(20.dp)
) {
- Text("Select Images")
+
+ // Hero section with gradient
+ HeroCard()
+
+ // How it works section
+ HowItWorksSection()
+
+ // Requirements section
+ RequirementsCard()
+
+ Spacer(Modifier.weight(1f))
+
+ // Main CTA button
+ Button(
+ onClick = { showInfoDialog = true },
+ modifier = Modifier
+ .fillMaxWidth()
+ .height(60.dp),
+ colors = ButtonDefaults.buttonColors(
+ containerColor = MaterialTheme.colorScheme.primary
+ ),
+ shape = RoundedCornerShape(16.dp)
+ ) {
+ Icon(
+ Icons.Default.PersonAdd,
+ contentDescription = null,
+ modifier = Modifier.size(24.dp)
+ )
+ Spacer(Modifier.width(12.dp))
+ Text(
+ "Start Training",
+ style = MaterialTheme.typography.titleLarge,
+ fontWeight = FontWeight.Bold
+ )
+ }
+
+ Spacer(Modifier.height(8.dp))
+ }
+ }
+
+ // Person info dialog
+ if (showInfoDialog) {
+ PersonInfoDialog(
+ onDismiss = { showInfoDialog = false },
+ onConfirm = { name, dob, relationship ->
+ showInfoDialog = false
+ // TODO: Store this info before photo selection
+ // For now, just proceed to photo selection
+ onSelectImages()
+ }
+ )
+ }
+}
+
+@Composable
+private fun HeroCard() {
+ Card(
+ modifier = Modifier.fillMaxWidth(),
+ colors = CardDefaults.cardColors(
+ containerColor = MaterialTheme.colorScheme.primaryContainer
+ ),
+ shape = RoundedCornerShape(20.dp)
+ ) {
+ Box(
+ modifier = Modifier
+ .fillMaxWidth()
+ .background(
+ Brush.verticalGradient(
+ colors = listOf(
+ MaterialTheme.colorScheme.primaryContainer,
+ MaterialTheme.colorScheme.primaryContainer.copy(alpha = 0.7f)
+ )
+ )
+ )
+ ) {
+ Column(
+ modifier = Modifier.padding(24.dp),
+ horizontalAlignment = Alignment.CenterHorizontally,
+ verticalArrangement = Arrangement.spacedBy(16.dp)
+ ) {
+ Surface(
+ shape = RoundedCornerShape(20.dp),
+ color = MaterialTheme.colorScheme.primary,
+ shadowElevation = 8.dp,
+ modifier = Modifier.size(80.dp)
+ ) {
+ Box(contentAlignment = Alignment.Center) {
+ Icon(
+ Icons.Default.Face,
+ contentDescription = null,
+ modifier = Modifier.size(48.dp),
+ tint = MaterialTheme.colorScheme.onPrimary
+ )
+ }
+ }
+
+ Text(
+ "Face Recognition Training",
+ style = MaterialTheme.typography.headlineMedium,
+ fontWeight = FontWeight.Bold,
+ textAlign = TextAlign.Center
+ )
+
+ Text(
+ "Train the AI to recognize someone in your photos",
+ style = MaterialTheme.typography.bodyLarge,
+ textAlign = TextAlign.Center,
+ color = MaterialTheme.colorScheme.onPrimaryContainer.copy(alpha = 0.8f)
+ )
+ }
}
}
}
+
+@Composable
+private fun HowItWorksSection() {
+ Column(verticalArrangement = Arrangement.spacedBy(12.dp)) {
+ Text(
+ "How It Works",
+ style = MaterialTheme.typography.titleLarge,
+ fontWeight = FontWeight.Bold
+ )
+
+ StepCard(
+ number = 1,
+ icon = Icons.Default.Info,
+ title = "Enter Person Details",
+ description = "Name, birthday, and relationship"
+ )
+
+ StepCard(
+ number = 2,
+ icon = Icons.Default.PhotoLibrary,
+ title = "Select Training Photos",
+ description = "Choose 20-30 photos of the person"
+ )
+
+ StepCard(
+ number = 3,
+ icon = Icons.Default.ModelTraining,
+ title = "AI Learns Their Face",
+ description = "Takes ~30 seconds to train"
+ )
+
+ StepCard(
+ number = 4,
+ icon = Icons.Default.Search,
+ title = "Auto-Tag Your Library",
+ description = "Find them in all your photos"
+ )
+ }
+}
+
+@Composable
+private fun StepCard(
+ number: Int,
+ icon: androidx.compose.ui.graphics.vector.ImageVector,
+ title: String,
+ description: String
+) {
+ Card(
+ modifier = Modifier.fillMaxWidth(),
+ colors = CardDefaults.cardColors(
+ containerColor = MaterialTheme.colorScheme.surfaceVariant
+ ),
+ shape = RoundedCornerShape(12.dp)
+ ) {
+ Row(
+ modifier = Modifier.padding(16.dp),
+ horizontalArrangement = Arrangement.spacedBy(16.dp),
+ verticalAlignment = Alignment.CenterVertically
+ ) {
+ // Number badge
+ Surface(
+ shape = RoundedCornerShape(12.dp),
+ color = MaterialTheme.colorScheme.primary,
+ modifier = Modifier.size(48.dp)
+ ) {
+ Box(contentAlignment = Alignment.Center) {
+ Text(
+ text = number.toString(),
+ style = MaterialTheme.typography.titleLarge,
+ fontWeight = FontWeight.Bold,
+ color = MaterialTheme.colorScheme.onPrimary
+ )
+ }
+ }
+
+ Column(modifier = Modifier.weight(1f)) {
+ Row(
+ horizontalArrangement = Arrangement.spacedBy(8.dp),
+ verticalAlignment = Alignment.CenterVertically
+ ) {
+ Icon(
+ icon,
+ contentDescription = null,
+ modifier = Modifier.size(20.dp),
+ tint = MaterialTheme.colorScheme.primary
+ )
+ Text(
+ title,
+ style = MaterialTheme.typography.titleMedium,
+ fontWeight = FontWeight.SemiBold
+ )
+ }
+ Spacer(Modifier.height(4.dp))
+ Text(
+ description,
+ style = MaterialTheme.typography.bodyMedium,
+ color = MaterialTheme.colorScheme.onSurfaceVariant
+ )
+ }
+ }
+ }
+}
+
+@Composable
+private fun RequirementsCard() {
+ Card(
+ modifier = Modifier.fillMaxWidth(),
+ colors = CardDefaults.cardColors(
+ containerColor = MaterialTheme.colorScheme.secondaryContainer
+ ),
+ shape = RoundedCornerShape(16.dp)
+ ) {
+ Column(
+ modifier = Modifier.padding(20.dp),
+ verticalArrangement = Arrangement.spacedBy(12.dp)
+ ) {
+ Row(
+ horizontalArrangement = Arrangement.spacedBy(8.dp),
+ verticalAlignment = Alignment.CenterVertically
+ ) {
+ Icon(
+ Icons.Default.CheckCircle,
+ contentDescription = null,
+ tint = MaterialTheme.colorScheme.primary
+ )
+ Text(
+ "What You'll Need",
+ style = MaterialTheme.typography.titleMedium,
+ fontWeight = FontWeight.Bold
+ )
+ }
+
+ RequirementItem("20-30 photos of the person", true)
+ RequirementItem("Different angles and lighting", true)
+ RequirementItem("Clear face visibility", true)
+ RequirementItem("Mix of expressions", true)
+ RequirementItem("2-3 minutes of your time", true)
+ }
+ }
+}
+
+@Composable
+private fun RequirementItem(text: String, isMet: Boolean) {
+ Row(
+ horizontalArrangement = Arrangement.spacedBy(8.dp),
+ verticalAlignment = Alignment.CenterVertically
+ ) {
+ Icon(
+ if (isMet) Icons.Default.Check else Icons.Default.Close,
+ contentDescription = null,
+ modifier = Modifier.size(18.dp),
+ tint = if (isMet) {
+ MaterialTheme.colorScheme.primary
+ } else {
+ MaterialTheme.colorScheme.error
+ }
+ )
+ Text(
+ text = text,
+ style = MaterialTheme.typography.bodyMedium
+ )
+ }
+}
+
+@OptIn(ExperimentalMaterial3Api::class)
+@Composable
+private fun PersonInfoDialog(
+ onDismiss: () -> Unit,
+ onConfirm: (name: String, dateOfBirth: Long?, relationship: String) -> Unit
+) {
+ var name by remember { mutableStateOf("") }
+ var dateOfBirth by remember { mutableStateOf(null) }
+ var selectedRelationship by remember { mutableStateOf("Other") }
+ var showDatePicker by remember { mutableStateOf(false) }
+
+ val relationships = listOf(
+ "Family" to "๐จโ๐ฉโ๐งโ๐ฆ",
+ "Friend" to "๐ค",
+ "Partner" to "โค๏ธ",
+ "Child" to "๐ถ",
+ "Parent" to "๐ช",
+ "Sibling" to "๐ซ",
+ "Colleague" to "๐ผ",
+ "Other" to "๐ค"
+ )
+
+ AlertDialog(
+ onDismissRequest = onDismiss,
+ title = {
+ Column {
+ Text("Person Details")
+ Text(
+ "Help us organize your photos",
+ style = MaterialTheme.typography.bodySmall,
+ color = MaterialTheme.colorScheme.onSurfaceVariant
+ )
+ }
+ },
+ text = {
+ Column(
+ modifier = Modifier.fillMaxWidth(),
+ verticalArrangement = Arrangement.spacedBy(16.dp)
+ ) {
+ // Name field
+ OutlinedTextField(
+ value = name,
+ onValueChange = { name = it },
+ label = { Text("Name *") },
+ placeholder = { Text("e.g., John Doe") },
+ leadingIcon = {
+ Icon(Icons.Default.Person, contentDescription = null)
+ },
+ modifier = Modifier.fillMaxWidth(),
+ singleLine = true
+ )
+
+ // Date of birth
+ OutlinedButton(
+ onClick = { showDatePicker = true },
+ modifier = Modifier.fillMaxWidth()
+ ) {
+ Icon(Icons.Default.Cake, contentDescription = null)
+ Spacer(Modifier.width(8.dp))
+ Text(
+ if (dateOfBirth != null) {
+ "Birthday: ${formatDate(dateOfBirth!!)}"
+ } else {
+ "Add Birthday (Optional)"
+ }
+ )
+ }
+
+ // Relationship selector
+ Column(verticalArrangement = Arrangement.spacedBy(8.dp)) {
+ Text(
+ "Relationship",
+ style = MaterialTheme.typography.labelMedium
+ )
+
+ // Relationship chips
+ Row(
+ modifier = Modifier.fillMaxWidth(),
+ horizontalArrangement = Arrangement.spacedBy(8.dp)
+ ) {
+ relationships.take(4).forEach { (rel, emoji) ->
+ FilterChip(
+ selected = selectedRelationship == rel,
+ onClick = { selectedRelationship = rel },
+ label = { Text("$emoji $rel") }
+ )
+ }
+ }
+ Row(
+ modifier = Modifier.fillMaxWidth(),
+ horizontalArrangement = Arrangement.spacedBy(8.dp)
+ ) {
+ relationships.drop(4).forEach { (rel, emoji) ->
+ FilterChip(
+ selected = selectedRelationship == rel,
+ onClick = { selectedRelationship = rel },
+ label = { Text("$emoji $rel") }
+ )
+ }
+ }
+ }
+
+ // Privacy note
+ Card(
+ colors = CardDefaults.cardColors(
+ containerColor = MaterialTheme.colorScheme.surfaceVariant
+ )
+ ) {
+ Row(
+ modifier = Modifier.padding(12.dp),
+ horizontalArrangement = Arrangement.spacedBy(8.dp)
+ ) {
+ Icon(
+ Icons.Default.Lock,
+ contentDescription = null,
+ modifier = Modifier.size(16.dp),
+ tint = MaterialTheme.colorScheme.primary
+ )
+ Text(
+ "All data stays on your device",
+ style = MaterialTheme.typography.bodySmall,
+ color = MaterialTheme.colorScheme.onSurfaceVariant
+ )
+ }
+ }
+ }
+ },
+ confirmButton = {
+ Button(
+ onClick = {
+ if (name.isNotBlank()) {
+ onConfirm(name, dateOfBirth, selectedRelationship)
+ }
+ },
+ enabled = name.isNotBlank()
+ ) {
+ Text("Continue")
+ }
+ },
+ dismissButton = {
+ TextButton(onClick = onDismiss) {
+ Text("Cancel")
+ }
+ }
+ )
+
+ // Date picker dialog
+ if (showDatePicker) {
+ DatePickerDialog(
+ onDismissRequest = { showDatePicker = false },
+ confirmButton = {
+ TextButton(
+ onClick = {
+ // Get selected date from date picker
+ // For now, set to current date as placeholder
+ dateOfBirth = System.currentTimeMillis()
+ showDatePicker = false
+ }
+ ) {
+ Text("OK")
+ }
+ },
+ dismissButton = {
+ TextButton(onClick = { showDatePicker = false }) {
+ Text("Cancel")
+ }
+ }
+ ) {
+ // Material3 DatePicker
+ DatePicker(
+ state = rememberDatePickerState(),
+ modifier = Modifier.padding(16.dp)
+ )
+ }
+ }
+}
+
+private fun formatDate(timestamp: Long): String {
+ val formatter = SimpleDateFormat("MMM dd, yyyy", Locale.getDefault())
+ return formatter.format(Date(timestamp))
+}
\ No newline at end of file
diff --git a/app/src/main/java/com/placeholder/sherpai2/util/DebugFlags.kt b/app/src/main/java/com/placeholder/sherpai2/util/DebugFlags.kt
new file mode 100644
index 0000000..6d64849
--- /dev/null
+++ b/app/src/main/java/com/placeholder/sherpai2/util/DebugFlags.kt
@@ -0,0 +1,68 @@
+package com.placeholder.sherpai2.util
+
+/**
+ * Debug feature flags
+ *
+ * Toggle these to enable/disable diagnostic features
+ * Set to false before release builds!
+ */
+object DebugFlags {
+
+ /**
+ * Enable verbose face recognition logging
+ *
+ * When true:
+ * - Logs every face detection
+ * - Logs similarity scores
+ * - Logs matching decisions
+ * - Shows why images are skipped
+ *
+ * Filter Logcat by: "FaceRecognition"
+ */
+ const val ENABLE_FACE_RECOGNITION_LOGGING = true // โ Toggle here
+
+ /**
+ * Show confidence scores in UI
+ */
+ const val SHOW_CONFIDENCE_IN_UI = true // โ Toggle here
+
+ /**
+ * Lower thresholds for better recall (more matches, some false positives)
+ */
+ const val USE_LIBERAL_THRESHOLDS = true // โ Toggle here
+}
+
+/**
+ * Diagnostic logger - only logs when flag is enabled
+ */
+object DiagnosticLogger {
+ private const val TAG = "FaceRecognition"
+
+ fun d(message: String) {
+ if (DebugFlags.ENABLE_FACE_RECOGNITION_LOGGING) {
+ android.util.Log.d(TAG, message)
+ }
+ }
+
+ fun i(message: String) {
+ if (DebugFlags.ENABLE_FACE_RECOGNITION_LOGGING) {
+ android.util.Log.i(TAG, message)
+ }
+ }
+
+ fun w(message: String) {
+ if (DebugFlags.ENABLE_FACE_RECOGNITION_LOGGING) {
+ android.util.Log.w(TAG, message)
+ }
+ }
+
+ fun e(message: String, throwable: Throwable? = null) {
+ if (DebugFlags.ENABLE_FACE_RECOGNITION_LOGGING) {
+ if (throwable != null) {
+ android.util.Log.e(TAG, message, throwable)
+ } else {
+ android.util.Log.e(TAG, message)
+ }
+ }
+ }
+}
\ No newline at end of file