1 Commits

Author SHA1 Message Date
genki
2b5f761d25 Oh yes - Thats how we do
No default params for KSP complainer fuck

UI sweeps
2026-01-10 00:08:04 -05:00
17 changed files with 201 additions and 1592 deletions

View File

@@ -8,33 +8,20 @@ import androidx.activity.ComponentActivity
import androidx.activity.compose.rememberLauncherForActivityResult
import androidx.activity.compose.setContent
import androidx.activity.result.contract.ActivityResultContracts
import androidx.compose.foundation.layout.*
import androidx.compose.material3.*
import androidx.compose.foundation.layout.Box
import androidx.compose.foundation.layout.fillMaxSize
import androidx.compose.material3.CircularProgressIndicator
import androidx.compose.material3.Text
import androidx.compose.runtime.*
import androidx.compose.ui.Alignment
import androidx.compose.ui.Modifier
import androidx.compose.ui.text.font.FontWeight
import androidx.compose.ui.unit.dp
import androidx.core.content.ContextCompat
import androidx.lifecycle.lifecycleScope
import com.placeholder.sherpai2.domain.repository.ImageRepository
import com.placeholder.sherpai2.ui.presentation.MainScreen
import com.placeholder.sherpai2.ui.theme.SherpAI2Theme
import dagger.hilt.android.AndroidEntryPoint
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.launch
import kotlinx.coroutines.withContext
import javax.inject.Inject
/**
* MainActivity - ENHANCED with background ingestion
*
* Key improvements:
* 1. Non-blocking ingestion - app loads immediately
* 2. Background processing with progress updates
* 3. Graceful handling of large photo collections
* 4. User can navigate while ingestion runs
*/
@AndroidEntryPoint
class MainActivity : ComponentActivity() {
@@ -59,7 +46,8 @@ class MainActivity : ComponentActivity() {
)
}
var ingestionState by remember { mutableStateOf<IngestionState>(IngestionState.NotStarted) }
var isIngesting by remember { mutableStateOf(false) }
var imagesIngested by remember { mutableStateOf(false) }
val permissionLauncher = rememberLauncherForActivityResult(
ActivityResultContracts.RequestPermission()
@@ -67,83 +55,35 @@ class MainActivity : ComponentActivity() {
hasPermission = granted
}
// Start background ingestion when permission granted
// Logic: Handle the flow of Permission -> Ingestion
LaunchedEffect(hasPermission) {
if (hasPermission && ingestionState is IngestionState.NotStarted) {
ingestionState = IngestionState.InProgress(0, 0)
// Launch in background - NON-BLOCKING
lifecycleScope.launch(Dispatchers.IO) {
try {
// Check if already ingested
val existingCount = imageRepository.getImageCount()
if (existingCount > 0) {
// Already have images, skip ingestion
withContext(Dispatchers.Main) {
ingestionState = IngestionState.Complete(existingCount)
}
} else {
// Start ingestion with progress tracking
imageRepository.ingestImagesWithProgress { current, total ->
ingestionState = IngestionState.InProgress(current, total)
}
val finalCount = imageRepository.getImageCount()
withContext(Dispatchers.Main) {
ingestionState = IngestionState.Complete(finalCount)
}
}
} catch (e: Exception) {
withContext(Dispatchers.Main) {
ingestionState = IngestionState.Error(e.message ?: "Failed to load images")
}
}
if (hasPermission) {
if (!imagesIngested && !isIngesting) {
isIngesting = true
imageRepository.ingestImages()
imagesIngested = true
isIngesting = false
}
} else if (!hasPermission) {
} else {
permissionLauncher.launch(storagePermission)
}
}
// UI State
// UI State Mapping
Box(
modifier = Modifier.fillMaxSize()
modifier = Modifier.fillMaxSize(),
contentAlignment = Alignment.Center
) {
when {
hasPermission -> {
// ALWAYS show main screen (non-blocking!)
hasPermission && imagesIngested -> {
MainScreen()
// Show progress overlay if still ingesting
if (ingestionState is IngestionState.InProgress) {
IngestionProgressOverlay(
state = ingestionState as IngestionState.InProgress
)
}
}
hasPermission && isIngesting -> {
// Show a loader so you know it's working!
CircularProgressIndicator()
}
else -> {
Box(
modifier = Modifier.fillMaxSize(),
contentAlignment = Alignment.Center
) {
Column(
horizontalAlignment = Alignment.CenterHorizontally,
verticalArrangement = Arrangement.spacedBy(16.dp)
) {
Text(
"Storage Permission Required",
style = MaterialTheme.typography.titleLarge,
fontWeight = FontWeight.Bold
)
Text(
"SherpAI needs access to your photos",
style = MaterialTheme.typography.bodyMedium
)
Button(onClick = { permissionLauncher.launch(storagePermission) }) {
Text("Grant Permission")
}
}
}
Text("Please grant storage permission to continue.")
}
}
}
@@ -151,79 +91,3 @@ class MainActivity : ComponentActivity() {
}
}
}
/**
* Ingestion state with progress tracking
*/
sealed class IngestionState {
object NotStarted : IngestionState()
data class InProgress(val current: Int, val total: Int) : IngestionState()
data class Complete(val imageCount: Int) : IngestionState()
data class Error(val message: String) : IngestionState()
}
/**
* Non-intrusive progress overlay
* Shows at bottom of screen, doesn't block UI
*/
@Composable
fun IngestionProgressOverlay(state: IngestionState.InProgress) {
Box(
modifier = Modifier.fillMaxSize(),
contentAlignment = Alignment.BottomCenter
) {
Card(
modifier = Modifier
.fillMaxWidth()
.padding(16.dp),
colors = CardDefaults.cardColors(
containerColor = MaterialTheme.colorScheme.primaryContainer
),
elevation = CardDefaults.cardElevation(defaultElevation = 8.dp)
) {
Column(
modifier = Modifier
.fillMaxWidth()
.padding(16.dp),
verticalArrangement = Arrangement.spacedBy(12.dp)
) {
Row(
modifier = Modifier.fillMaxWidth(),
horizontalArrangement = Arrangement.SpaceBetween,
verticalAlignment = Alignment.CenterVertically
) {
Text(
text = "Loading photos...",
style = MaterialTheme.typography.titleMedium,
fontWeight = FontWeight.Bold
)
if (state.total > 0) {
Text(
text = "${state.current} / ${state.total}",
style = MaterialTheme.typography.bodyMedium,
color = MaterialTheme.colorScheme.primary
)
}
}
if (state.total > 0) {
LinearProgressIndicator(
progress = { state.current.toFloat() / state.total.toFloat() },
modifier = Modifier.fillMaxWidth(),
)
} else {
LinearProgressIndicator(
modifier = Modifier.fillMaxWidth()
)
}
Text(
text = "You can start using the app while photos load in the background",
style = MaterialTheme.typography.bodySmall,
color = MaterialTheme.colorScheme.onSurfaceVariant
)
}
}
}
}

View File

@@ -72,19 +72,4 @@ interface ImageDao {
*/
@Query("SELECT * FROM images WHERE imageId IN (:imageIds)")
suspend fun getImagesByIds(imageIds: List<String>): List<ImageEntity>
@Query("SELECT COUNT(*) FROM images")
suspend fun getImageCount(): Int
/**
* Get all images (for utilities processing)
*/
@Query("SELECT * FROM images ORDER BY capturedAt DESC")
suspend fun getAllImages(): List<ImageEntity>
/**
* Get all images sorted by time (for burst detection)
*/
@Query("SELECT * FROM images ORDER BY capturedAt ASC")
suspend fun getAllImagesSortedByTime(): List<ImageEntity>
}

View File

@@ -44,10 +44,4 @@ interface ImageTagDao {
WHERE it.imageId = :imageId AND it.visibility = 'PUBLIC'
""")
fun getTagsForImage(imageId: String): Flow<List<TagEntity>>
/**
* Insert image tag (for utilities tagging)
*/
@Insert(onConflict = OnConflictStrategy.IGNORE)
suspend fun insert(imageTag: ImageTagEntity): Long
}

View File

@@ -23,23 +23,9 @@ interface ImageRepository {
* This function:
* - deduplicates
* - assigns events automatically
* - BLOCKS until complete (old behavior)
*/
suspend fun ingestImages()
/**
* Ingest images with progress callback (NEW!)
*
* @param onProgress Called with (current, total) for progress updates
*/
suspend fun ingestImagesWithProgress(onProgress: (current: Int, total: Int) -> Unit)
/**
* Get total image count (NEW!)
* Fast query to check if images already loaded
*/
suspend fun getImageCount(): Int
fun getAllImages(): Flow<List<ImageWithEverything>>
fun findImagesByTag(tag: String): Flow<List<ImageWithEverything>>

View File

@@ -15,21 +15,11 @@ import dagger.hilt.android.qualifiers.ApplicationContext
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.flow.Flow
import kotlinx.coroutines.withContext
import kotlinx.coroutines.yield
import java.security.MessageDigest
import java.util.*
import javax.inject.Inject
import javax.inject.Singleton
/**
* ImageRepositoryImpl - ENHANCED for large photo collections
*
* Key improvements:
* 1. Batched processing (100 images at a time)
* 2. Progress callbacks
* 3. Yields to prevent ANR
* 4. Fast image count check
*/
@Singleton
class ImageRepositoryImpl @Inject constructor(
private val imageDao: ImageDao,
@@ -44,85 +34,38 @@ class ImageRepositoryImpl @Inject constructor(
}
/**
* Get total image count - FAST
*/
override suspend fun getImageCount(): Int = withContext(Dispatchers.IO) {
return@withContext imageDao.getImageCount()
}
/**
* Original blocking ingestion (for backward compatibility)
* Ingest all images from MediaStore.
* Uses _ID and DATE_ADDED to ensure no image is skipped, even if DATE_TAKEN is identical.
*/
override suspend fun ingestImages(): Unit = withContext(Dispatchers.IO) {
ingestImagesWithProgress { _, _ -> }
}
/**
* Enhanced ingestion with progress tracking
* Processes in batches to prevent ANR and memory issues
* SCANS ALL FOLDERS RECURSIVELY (including nested directories)
*/
override suspend fun ingestImagesWithProgress(
onProgress: (current: Int, total: Int) -> Unit
): Unit = withContext(Dispatchers.IO) {
try {
val imageList = mutableListOf<ImageEntity>()
val projection = arrayOf(
MediaStore.Images.Media._ID,
MediaStore.Images.Media.DISPLAY_NAME,
MediaStore.Images.Media.DATE_TAKEN,
MediaStore.Images.Media.DATE_ADDED,
MediaStore.Images.Media.WIDTH,
MediaStore.Images.Media.HEIGHT,
MediaStore.Images.Media.DATA // Full file path
MediaStore.Images.Media.HEIGHT
)
val sortOrder = "${MediaStore.Images.Media.DATE_ADDED} ASC"
// IMPORTANT: Don't filter by BUCKET_ID or folder
// This scans ALL images on device including nested folders
val selection = null // No WHERE clause = all images
val selectionArgs = null
// First pass: Count total images
var totalImages = 0
context.contentResolver.query(
MediaStore.Images.Media.EXTERNAL_CONTENT_URI,
arrayOf(MediaStore.Images.Media._ID),
selection,
selectionArgs,
null
)?.use { cursor ->
totalImages = cursor.count
}
if (totalImages == 0) {
Log.i("ImageRepository", "No images found on device")
return@withContext
}
Log.i("ImageRepository", "Found $totalImages images to process (ALL folders)")
onProgress(0, totalImages)
// Second pass: Process in batches
val batchSize = 100
var processed = 0
context.contentResolver.query(
MediaStore.Images.Media.EXTERNAL_CONTENT_URI,
projection,
selection,
selectionArgs,
null,
null,
sortOrder
)?.use { cursor ->
val idCol = cursor.getColumnIndexOrThrow(MediaStore.Images.Media._ID)
val nameCol = cursor.getColumnIndexOrThrow(MediaStore.Images.Media.DISPLAY_NAME)
val dateTakenCol = cursor.getColumnIndexOrThrow(MediaStore.Images.Media.DATE_TAKEN)
val dateAddedCol = cursor.getColumnIndexOrThrow(MediaStore.Images.Media.DATE_ADDED)
val widthCol = cursor.getColumnIndexOrThrow(MediaStore.Images.Media.WIDTH)
val heightCol = cursor.getColumnIndexOrThrow(MediaStore.Images.Media.HEIGHT)
val dataCol = cursor.getColumnIndexOrThrow(MediaStore.Images.Media.DATA)
val batch = mutableListOf<ImageEntity>()
while (cursor.moveToNext()) {
val id = cursor.getLong(idCol)
@@ -131,14 +74,16 @@ class ImageRepositoryImpl @Inject constructor(
val dateAdded = cursor.getLong(dateAddedCol)
val width = cursor.getInt(widthCol)
val height = cursor.getInt(heightCol)
val filePath = cursor.getString(dataCol)
val contentUri: Uri = ContentUris.withAppendedId(
MediaStore.Images.Media.EXTERNAL_CONTENT_URI, id
)
// Skip SHA256 computation for speed - use URI as unique identifier
val sha256 = computeSHA256Fast(contentUri) ?: contentUri.toString()
val sha256 = computeSHA256(contentUri)
if (sha256 == null) {
Log.w("ImageRepository", "Skipped image: $displayName (cannot read bytes)")
continue
}
val imageEntity = ImageEntity(
imageId = UUID.randomUUID().toString(),
@@ -148,73 +93,36 @@ class ImageRepositoryImpl @Inject constructor(
ingestedAt = System.currentTimeMillis(),
width = width,
height = height,
source = determineSource(filePath)
source = "CAMERA" // or SCREENSHOT / IMPORTED
)
batch.add(imageEntity)
processed++
// Insert batch and update progress
if (batch.size >= batchSize) {
imageDao.insertImages(batch)
batch.clear()
// Update progress on main thread
withContext(Dispatchers.Main) {
onProgress(processed, totalImages)
}
// Yield to prevent blocking
yield()
Log.d("ImageRepository", "Processed $processed/$totalImages images")
}
}
// Insert remaining batch
if (batch.isNotEmpty()) {
imageDao.insertImages(batch)
withContext(Dispatchers.Main) {
onProgress(processed, totalImages)
}
imageList += imageEntity
Log.i("ImageRepository", "Processing image: $displayName, SHA256: $sha256")
}
}
Log.i("ImageRepository", "Ingestion complete: $processed images from ALL folders")
if (imageList.isNotEmpty()) {
imageDao.insertImages(imageList)
Log.i("ImageRepository", "Ingested ${imageList.size} images")
} else {
Log.i("ImageRepository", "No images found on device")
}
} catch (e: Exception) {
Log.e("ImageRepository", "Error ingesting images", e)
throw e
}
}
/**
* Determine image source from file path
* Compute SHA256 from a MediaStore Uri safely.
*/
private fun determineSource(filePath: String?): String {
if (filePath == null) return "CAMERA"
return when {
filePath.contains("DCIM", ignoreCase = true) -> "CAMERA"
filePath.contains("Screenshot", ignoreCase = true) -> "SCREENSHOT"
filePath.contains("Download", ignoreCase = true) -> "IMPORTED"
filePath.contains("WhatsApp", ignoreCase = true) -> "IMPORTED"
else -> "CAMERA"
}
}
/**
* Fast SHA256 computation - only reads first 8KB for speed
* For 10,000+ images, this saves significant time
*/
private fun computeSHA256Fast(uri: Uri): String? {
private fun computeSHA256(uri: Uri): String? {
return try {
val digest = MessageDigest.getInstance("SHA-256")
context.contentResolver.openInputStream(uri)?.use { input ->
// Only read first 8KB for uniqueness check
val buffer = ByteArray(8192)
val read = input.read(buffer)
if (read > 0) {
var read: Int
while (input.read(buffer).also { read = it } > 0) {
digest.update(buffer, 0, read)
}
} ?: return null

View File

@@ -78,8 +78,8 @@ sealed class AppDestinations(
description = "Manage photo tags"
)
data object UTILITIES : AppDestinations(
route = AppRoutes.UTILITIES,
data object Upload : AppDestinations(
route = AppRoutes.UPLOAD,
icon = Icons.Default.UploadFile,
label = "Upload",
description = "Add new photos"
@@ -117,7 +117,7 @@ val faceRecognitionDestinations = listOf(
// Organization section
val organizationDestinations = listOf(
AppDestinations.Tags,
AppDestinations.UTILITIES
AppDestinations.Upload
)
// Settings (separate, pinned to bottom)
@@ -140,7 +140,7 @@ fun getDestinationByRoute(route: String?): AppDestinations? {
AppRoutes.TRAIN -> AppDestinations.Train
AppRoutes.MODELS -> AppDestinations.Models
AppRoutes.TAGS -> AppDestinations.Tags
AppRoutes.UTILITIES -> AppDestinations.UTILITIES
AppRoutes.UPLOAD -> AppDestinations.Upload
AppRoutes.SETTINGS -> AppDestinations.Settings
else -> null
}

View File

@@ -13,7 +13,6 @@ import androidx.navigation.compose.NavHost
import androidx.navigation.compose.composable
import androidx.navigation.navArgument
import com.placeholder.sherpai2.ui.devscreens.DummyScreen
import com.placeholder.sherpai2.ui.album.AlbumViewScreen
import com.placeholder.sherpai2.ui.explore.ExploreScreen
import com.placeholder.sherpai2.ui.imagedetail.ImageDetailScreen
import com.placeholder.sherpai2.ui.modelinventory.PersonInventoryScreen
@@ -25,7 +24,6 @@ import com.placeholder.sherpai2.ui.trainingprep.ScanResultsScreen
import com.placeholder.sherpai2.ui.trainingprep.ScanningState
import com.placeholder.sherpai2.ui.trainingprep.TrainViewModel
import com.placeholder.sherpai2.ui.trainingprep.TrainingScreen
import com.placeholder.sherpai2.ui.utilities.PhotoUtilitiesScreen
import java.net.URLDecoder
import java.net.URLEncoder
@@ -71,10 +69,6 @@ fun AppNavHost(
onImageClick = { imageUri ->
val encodedUri = URLEncoder.encode(imageUri, "UTF-8")
navController.navigate("${AppRoutes.IMAGE_DETAIL}/$encodedUri")
},
onAlbumClick = { tagValue ->
// Navigate to tag-based album
navController.navigate("album/tag/$tagValue")
}
)
}
@@ -86,7 +80,10 @@ fun AppNavHost(
composable(AppRoutes.EXPLORE) {
ExploreScreen(
onAlbumClick = { albumType, albumId ->
navController.navigate("album/$albumType/$albumId")
println("Album clicked: type=$albumType id=$albumId")
// Example future navigation
// navController.navigate("${AppRoutes.ALBUM}/$albumType/$albumId")
}
)
}
@@ -113,32 +110,6 @@ fun AppNavHost(
)
}
/**
* ALBUM VIEW SCREEN
* View photos in a specific album (tag, person, or time-based)
*/
composable(
route = "album/{albumType}/{albumId}",
arguments = listOf(
navArgument("albumType") {
type = NavType.StringType
},
navArgument("albumId") {
type = NavType.StringType
}
)
) {
AlbumViewScreen(
onBack = {
navController.popBackStack()
},
onImageClick = { imageUri ->
val encodedUri = URLEncoder.encode(imageUri, "UTF-8")
navController.navigate("${AppRoutes.IMAGE_DETAIL}/$encodedUri")
}
)
}
// ==========================================
// FACE RECOGNITION SYSTEM
// ==========================================
@@ -252,11 +223,14 @@ fun AppNavHost(
}
/**
* UTILITIES SCREEN
* Photo collection management tools
* UPLOAD SCREEN
* Import new photos (placeholder)
*/
composable(AppRoutes.UTILITIES) {
PhotoUtilitiesScreen()
composable(AppRoutes.UPLOAD) {
DummyScreen(
title = "Upload",
subtitle = "Add photos to your library"
)
}
// ==========================================

View File

@@ -13,7 +13,7 @@ package com.placeholder.sherpai2.ui.navigation
object AppRoutes {
// Photo browsing
const val SEARCH = "search"
const val EXPLORE = "explore"
const val EXPLORE = "explore" // UPDATED: Changed from TOUR
const val IMAGE_DETAIL = "IMAGE_DETAIL"
// Face recognition
@@ -23,7 +23,7 @@ object AppRoutes {
// Organization
const val TAGS = "tags"
const val UTILITIES = "utilities" // CHANGED from UPLOAD
const val UPLOAD = "upload"
// Settings
const val SETTINGS = "settings"
@@ -33,8 +33,4 @@ object AppRoutes {
const val CROP_SCREEN = "CROP_SCREEN"
const val TRAINING_SCREEN = "TRAINING_SCREEN"
const val ScanResultsScreen = "First Scan Results"
// Album view
const val ALBUM_VIEW = "album/{albumType}/{albumId}"
fun albumRoute(albumType: String, albumId: String) = "album/$albumType/$albumId"
}

View File

@@ -135,7 +135,7 @@ fun AppDrawerContent(
val orgItems = listOf(
DrawerItem(AppRoutes.TAGS, "Tags", Icons.AutoMirrored.Filled.Label, "Manage photo tags"),
DrawerItem(AppRoutes.UTILITIES, "Upload", Icons.Default.UploadFile, "Add new photos")
DrawerItem(AppRoutes.UPLOAD, "Upload", Icons.Default.UploadFile, "Add new photos")
)
orgItems.forEach { item ->

View File

@@ -150,7 +150,7 @@ fun MainScreen() {
Icon(Icons.Default.Add, "Add Tag")
}
}
AppRoutes.UTILITIES -> {
AppRoutes.UPLOAD -> {
ExtendedFloatingActionButton(
onClick = { /* TODO: Select photos */ },
icon = { Icon(Icons.Default.CloudUpload, "Upload") },
@@ -185,7 +185,7 @@ private fun getScreenTitle(route: String): String {
AppRoutes.TRAIN -> "Train New Person"
AppRoutes.MODELS -> "AI Models"
AppRoutes.TAGS -> "Tag Management"
AppRoutes.UTILITIES -> "Photo Util."
AppRoutes.UPLOAD -> "Upload Photos"
AppRoutes.SETTINGS -> "Settings"
else -> "SherpAI"
}
@@ -201,7 +201,7 @@ private fun getScreenSubtitle(route: String): String? {
AppRoutes.INVENTORY -> "Trained face models"
AppRoutes.TRAIN -> "Add a new person to recognize"
AppRoutes.TAGS -> "Organize your photo collection"
AppRoutes.UTILITIES -> "Tools for managing collection"
AppRoutes.UPLOAD -> "Add photos to your library"
else -> null
}
}
@@ -213,7 +213,7 @@ private fun shouldShowFab(route: String): Boolean {
return when (route) {
AppRoutes.SEARCH,
AppRoutes.TAGS,
AppRoutes.UTILITIES -> true
AppRoutes.UPLOAD -> true
else -> false
}
}

View File

@@ -303,10 +303,8 @@ private fun PhotoCard(
onClick = { onImageClick(imageWithFaceTags.image.imageUri) }
)
// Person tags (deduplicated)
val uniquePersons = imageWithFaceTags.persons.distinctBy { it.id }
if (uniquePersons.isNotEmpty()) {
// Person tags
if (imageWithFaceTags.persons.isNotEmpty()) {
when (displayMode) {
DisplayMode.SIMPLE -> {
// SIMPLE: Just names, no icons, no percentages
@@ -315,7 +313,7 @@ private fun PhotoCard(
modifier = Modifier.fillMaxWidth()
) {
Text(
text = uniquePersons
text = imageWithFaceTags.persons
.take(3)
.joinToString(", ") { it.name },
style = MaterialTheme.typography.bodySmall,
@@ -326,7 +324,7 @@ private fun PhotoCard(
}
}
DisplayMode.VERBOSE -> {
// VERBOSE: Person tags + System tags
// VERBOSE: Icons + names + confidence
Surface(
color = MaterialTheme.colorScheme.primaryContainer.copy(alpha = 0.5f),
modifier = Modifier.fillMaxWidth()
@@ -335,66 +333,44 @@ private fun PhotoCard(
modifier = Modifier.padding(8.dp),
verticalArrangement = Arrangement.spacedBy(4.dp)
) {
// Person tags with confidence
uniquePersons.take(3).forEachIndexed { index, person ->
Row(
horizontalArrangement = Arrangement.spacedBy(6.dp),
verticalAlignment = Alignment.CenterVertically
) {
Icon(
Icons.Default.Face,
contentDescription = null,
modifier = Modifier.size(14.dp),
tint = MaterialTheme.colorScheme.primary
)
Text(
text = person.name,
style = MaterialTheme.typography.bodySmall,
modifier = Modifier.weight(1f),
maxLines = 1,
overflow = TextOverflow.Ellipsis
)
// Find matching face tag for confidence
val matchingTag = imageWithFaceTags.faceTags
.find { tag ->
imageWithFaceTags.persons[imageWithFaceTags.faceTags.indexOf(tag)].id == person.id
}
if (matchingTag != null) {
val confidence = (matchingTag.confidence * 100).toInt()
Text(
text = "$confidence%",
style = MaterialTheme.typography.labelSmall,
color = MaterialTheme.colorScheme.primary
imageWithFaceTags.persons
.take(3)
.forEachIndexed { index, person ->
Row(
horizontalArrangement = Arrangement.spacedBy(6.dp),
verticalAlignment = Alignment.CenterVertically
) {
Icon(
Icons.Default.Face,
contentDescription = null,
modifier = Modifier.size(14.dp),
tint = MaterialTheme.colorScheme.primary
)
Text(
text = person.name,
style = MaterialTheme.typography.bodySmall,
modifier = Modifier.weight(1f),
maxLines = 1,
overflow = TextOverflow.Ellipsis
)
if (index < imageWithFaceTags.faceTags.size) {
val confidence = (imageWithFaceTags.faceTags[index].confidence * 100).toInt()
Text(
text = "$confidence%",
style = MaterialTheme.typography.labelSmall,
color = MaterialTheme.colorScheme.primary
)
}
}
}
}
if (uniquePersons.size > 3) {
if (imageWithFaceTags.persons.size > 3) {
Text(
text = "+${uniquePersons.size - 3} more",
text = "+${imageWithFaceTags.persons.size - 3} more",
style = MaterialTheme.typography.labelSmall,
color = MaterialTheme.colorScheme.primary
)
}
// System tags (verbose mode only)
// TODO: Get image tags from ImageWithEverything
// For now, show placeholder
HorizontalDivider(
modifier = Modifier.padding(vertical = 4.dp),
color = MaterialTheme.colorScheme.outline.copy(alpha = 0.3f)
)
Row(
horizontalArrangement = Arrangement.spacedBy(4.dp),
modifier = Modifier.fillMaxWidth()
) {
// Example system tags - replace with actual tags from image
SystemTagChip("indoor")
SystemTagChip("high_res")
SystemTagChip("morning")
}
}
}
}
@@ -404,20 +380,6 @@ private fun PhotoCard(
}
}
@Composable
private fun SystemTagChip(tagValue: String) {
Surface(
shape = RoundedCornerShape(4.dp),
color = MaterialTheme.colorScheme.secondaryContainer.copy(alpha = 0.5f)
) {
Text(
text = tagValue.replace("_", " "),
style = MaterialTheme.typography.labelSmall,
modifier = Modifier.padding(horizontal = 4.dp, vertical = 2.dp)
)
}
}
@Composable
private fun EmptySearchState() {
Box(

View File

@@ -8,29 +8,19 @@ import android.net.Uri
import com.google.mlkit.vision.common.InputImage
import com.google.mlkit.vision.face.FaceDetection
import com.google.mlkit.vision.face.FaceDetectorOptions
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.async
import kotlinx.coroutines.awaitAll
import kotlinx.coroutines.coroutineScope
import kotlinx.coroutines.tasks.await
import kotlinx.coroutines.withContext
import java.io.InputStream
/**
* FIXED FaceDetectionHelper with parallel processing
*
* FIXES:
* - Removed bitmap.recycle() that broke face cropping
* - Proper memory management with downsampling
* - Parallel processing for speed
* Helper class for detecting faces in images using ML Kit Face Detection
*/
class FaceDetectionHelper(private val context: Context) {
private val faceDetectorOptions = FaceDetectorOptions.Builder()
.setPerformanceMode(FaceDetectorOptions.PERFORMANCE_MODE_ACCURATE) // ACCURATE for quality
.setPerformanceMode(FaceDetectorOptions.PERFORMANCE_MODE_ACCURATE)
.setLandmarkMode(FaceDetectorOptions.LANDMARK_MODE_ALL)
.setClassificationMode(FaceDetectorOptions.CLASSIFICATION_MODE_ALL)
.setMinFaceSize(0.15f)
.setMinFaceSize(0.15f) // Detect faces that are at least 15% of image
.build()
private val detector = FaceDetection.getClient(faceDetectorOptions)
@@ -40,7 +30,7 @@ class FaceDetectionHelper(private val context: Context) {
val hasFace: Boolean,
val faceCount: Int,
val faceBounds: List<Rect> = emptyList(),
val croppedFaceBitmap: Bitmap? = null, // Only largest face
val croppedFaceBitmap: Bitmap? = null,
val errorMessage: String? = null
)
@@ -48,77 +38,48 @@ class FaceDetectionHelper(private val context: Context) {
* Detect faces in a single image
*/
suspend fun detectFacesInImage(uri: Uri): FaceDetectionResult {
return withContext(Dispatchers.IO) {
var bitmap: Bitmap? = null
try {
bitmap = loadBitmap(uri)
if (bitmap == null) {
return@withContext FaceDetectionResult(
uri = uri,
hasFace = false,
faceCount = 0,
errorMessage = "Failed to load image"
)
}
val inputImage = InputImage.fromBitmap(bitmap, 0)
val faces = detector.process(inputImage).await()
// Sort by face size (area) to get the largest face
val sortedFaces = faces.sortedByDescending { face ->
face.boundingBox.width() * face.boundingBox.height()
}
val croppedFace = if (sortedFaces.isNotEmpty()) {
// Crop the LARGEST detected face (most likely the subject)
cropFaceFromBitmap(bitmap, sortedFaces[0].boundingBox)
} else null
FaceDetectionResult(
uri = uri,
hasFace = faces.isNotEmpty(),
faceCount = faces.size,
faceBounds = faces.map { it.boundingBox },
croppedFaceBitmap = croppedFace
)
} catch (e: Exception) {
FaceDetectionResult(
return try {
val bitmap = loadBitmap(uri)
if (bitmap == null) {
return FaceDetectionResult(
uri = uri,
hasFace = false,
faceCount = 0,
errorMessage = e.message ?: "Unknown error"
errorMessage = "Failed to load image"
)
} finally {
// NOW we can recycle after we're completely done
bitmap?.recycle()
}
val inputImage = InputImage.fromBitmap(bitmap, 0)
val faces = detector.process(inputImage).await()
val croppedFace = if (faces.isNotEmpty()) {
// Crop the first detected face with some padding
cropFaceFromBitmap(bitmap, faces[0].boundingBox)
} else null
FaceDetectionResult(
uri = uri,
hasFace = faces.isNotEmpty(),
faceCount = faces.size,
faceBounds = faces.map { it.boundingBox },
croppedFaceBitmap = croppedFace
)
} catch (e: Exception) {
FaceDetectionResult(
uri = uri,
hasFace = false,
faceCount = 0,
errorMessage = e.message ?: "Unknown error"
)
}
}
/**
* PARALLEL face detection in multiple images - 10x FASTER!
*
* @param onProgress Callback with (current, total)
* Detect faces in multiple images
*/
suspend fun detectFacesInImages(
uris: List<Uri>,
onProgress: ((Int, Int) -> Unit)? = null
): List<FaceDetectionResult> = coroutineScope {
val total = uris.size
var completed = 0
// Process in parallel batches of 5 to avoid overwhelming the system
uris.chunked(5).flatMap { batch ->
batch.map { uri ->
async(Dispatchers.IO) {
val result = detectFacesInImage(uri)
synchronized(this@FaceDetectionHelper) {
completed++
onProgress?.invoke(completed, total)
}
result
}
}.awaitAll()
suspend fun detectFacesInImages(uris: List<Uri>): List<FaceDetectionResult> {
return uris.map { uri ->
detectFacesInImage(uri)
}
}
@@ -141,35 +102,13 @@ class FaceDetectionHelper(private val context: Context) {
}
/**
* Load bitmap from URI with downsampling for memory efficiency
* Load bitmap from URI
*/
private fun loadBitmap(uri: Uri): Bitmap? {
return try {
val inputStream: InputStream? = context.contentResolver.openInputStream(uri)
// First decode with inJustDecodeBounds to get dimensions
val options = BitmapFactory.Options().apply {
inJustDecodeBounds = true
}
BitmapFactory.decodeStream(inputStream, null, options)
inputStream?.close()
// Calculate sample size to limit max dimension to 1024px
val maxDimension = 1024
var sampleSize = 1
while (options.outWidth / sampleSize > maxDimension ||
options.outHeight / sampleSize > maxDimension) {
sampleSize *= 2
}
// Now decode with sample size
val inputStream2 = context.contentResolver.openInputStream(uri)
val finalOptions = BitmapFactory.Options().apply {
inSampleSize = sampleSize
}
BitmapFactory.decodeStream(inputStream2, null, finalOptions)?.also {
inputStream2?.close()
BitmapFactory.decodeStream(inputStream)?.also {
inputStream?.close()
}
} catch (e: Exception) {
null

View File

@@ -95,6 +95,7 @@ fun ScanResultsScreen(
ImprovedResultsView(
result = state.sanityCheckResult,
onContinue = {
// Show name input dialog instead of immediately finishing
showNameInputDialog = true
},
onRetry = onFinish,
@@ -103,8 +104,7 @@ fun ScanResultsScreen(
},
onSelectFaceFromMultiple = { result ->
showFacePickerDialog = result
},
trainViewModel = trainViewModel
}
)
}
@@ -357,8 +357,7 @@ private fun ImprovedResultsView(
onContinue: () -> Unit,
onRetry: () -> Unit,
onReplaceImage: (Uri, Uri) -> Unit,
onSelectFaceFromMultiple: (FaceDetectionHelper.FaceDetectionResult) -> Unit,
trainViewModel: TrainViewModel
onSelectFaceFromMultiple: (FaceDetectionHelper.FaceDetectionResult) -> Unit
) {
LazyColumn(
modifier = Modifier.fillMaxSize(),
@@ -420,9 +419,7 @@ private fun ImprovedResultsView(
},
onSelectFace = if (imageResult.faceCount > 1) {
{ onSelectFaceFromMultiple(imageResult) }
} else null,
trainViewModel = trainViewModel,
isExcluded = trainViewModel.isImageExcluded(imageResult.uri)
} else null
)
}
@@ -591,9 +588,7 @@ private fun ImageResultCard(
index: Int,
result: FaceDetectionHelper.FaceDetectionResult,
onReplace: (Uri) -> Unit,
onSelectFace: (() -> Unit)?,
trainViewModel: TrainViewModel,
isExcluded: Boolean
onSelectFace: (() -> Unit)?
) {
val photoPickerLauncher = rememberLauncherForActivityResult(
contract = ActivityResultContracts.PickVisualMedia()
@@ -602,7 +597,6 @@ private fun ImageResultCard(
}
val status = when {
isExcluded -> ImageStatus.EXCLUDED
result.errorMessage != null -> ImageStatus.ERROR
!result.hasFace -> ImageStatus.NO_FACE
result.faceCount > 1 -> ImageStatus.MULTIPLE_FACES
@@ -616,7 +610,6 @@ private fun ImageResultCard(
containerColor = when (status) {
ImageStatus.VALID -> MaterialTheme.colorScheme.primaryContainer.copy(alpha = 0.3f)
ImageStatus.MULTIPLE_FACES -> MaterialTheme.colorScheme.tertiaryContainer.copy(alpha = 0.4f)
ImageStatus.EXCLUDED -> MaterialTheme.colorScheme.surfaceVariant.copy(alpha = 0.5f)
else -> MaterialTheme.colorScheme.errorContainer.copy(alpha = 0.3f)
}
)
@@ -636,7 +629,6 @@ private fun ImageResultCard(
color = when (status) {
ImageStatus.VALID -> MaterialTheme.colorScheme.primary
ImageStatus.MULTIPLE_FACES -> MaterialTheme.colorScheme.tertiary
ImageStatus.EXCLUDED -> MaterialTheme.colorScheme.outline
else -> MaterialTheme.colorScheme.error
},
shape = CircleShape
@@ -665,7 +657,6 @@ private fun ImageResultCard(
when (status) {
ImageStatus.VALID -> MaterialTheme.colorScheme.primary
ImageStatus.MULTIPLE_FACES -> MaterialTheme.colorScheme.tertiary
ImageStatus.EXCLUDED -> MaterialTheme.colorScheme.outline
else -> MaterialTheme.colorScheme.error
}
),
@@ -693,14 +684,12 @@ private fun ImageResultCard(
imageVector = when (status) {
ImageStatus.VALID -> Icons.Default.CheckCircle
ImageStatus.MULTIPLE_FACES -> Icons.Default.Info
ImageStatus.EXCLUDED -> Icons.Default.RemoveCircle
else -> Icons.Default.Warning
},
contentDescription = null,
tint = when (status) {
ImageStatus.VALID -> MaterialTheme.colorScheme.primary
ImageStatus.MULTIPLE_FACES -> MaterialTheme.colorScheme.tertiary
ImageStatus.EXCLUDED -> MaterialTheme.colorScheme.outline
else -> MaterialTheme.colorScheme.error
},
modifier = Modifier.size(20.dp)
@@ -711,7 +700,6 @@ private fun ImageResultCard(
ImageStatus.VALID -> "Face Detected"
ImageStatus.MULTIPLE_FACES -> "Multiple Faces (${result.faceCount})"
ImageStatus.NO_FACE -> "No Face Detected"
ImageStatus.EXCLUDED -> "Excluded"
ImageStatus.ERROR -> "Error"
},
style = MaterialTheme.typography.bodyMedium,
@@ -732,8 +720,8 @@ private fun ImageResultCard(
horizontalAlignment = Alignment.End,
verticalArrangement = Arrangement.spacedBy(4.dp)
) {
// Select Face button (for multiple faces, not excluded)
if (onSelectFace != null && !isExcluded) {
// Select Face button (for multiple faces)
if (onSelectFace != null) {
OutlinedButton(
onClick = onSelectFace,
modifier = Modifier.height(32.dp),
@@ -753,62 +741,23 @@ private fun ImageResultCard(
}
}
// Replace button (not for excluded)
if (!isExcluded) {
OutlinedButton(
onClick = {
photoPickerLauncher.launch(
PickVisualMediaRequest(ActivityResultContracts.PickVisualMedia.ImageOnly)
)
},
modifier = Modifier.height(32.dp),
contentPadding = PaddingValues(horizontal = 12.dp, vertical = 0.dp)
) {
Icon(
Icons.Default.Refresh,
contentDescription = null,
modifier = Modifier.size(16.dp)
)
Spacer(modifier = Modifier.width(4.dp))
Text("Replace", style = MaterialTheme.typography.bodySmall)
}
}
// Exclude/Include button
// Replace button
OutlinedButton(
onClick = {
if (isExcluded) {
trainViewModel.includeImage(result.uri)
} else {
trainViewModel.excludeImage(result.uri)
}
photoPickerLauncher.launch(
PickVisualMediaRequest(ActivityResultContracts.PickVisualMedia.ImageOnly)
)
},
modifier = Modifier.height(32.dp),
contentPadding = PaddingValues(horizontal = 12.dp, vertical = 0.dp),
colors = ButtonDefaults.outlinedButtonColors(
contentColor = if (isExcluded)
MaterialTheme.colorScheme.primary
else
MaterialTheme.colorScheme.error
),
border = BorderStroke(
1.dp,
if (isExcluded)
MaterialTheme.colorScheme.primary
else
MaterialTheme.colorScheme.error
)
contentPadding = PaddingValues(horizontal = 12.dp, vertical = 0.dp)
) {
Icon(
if (isExcluded) Icons.Default.Add else Icons.Default.Close,
Icons.Default.Refresh,
contentDescription = null,
modifier = Modifier.size(16.dp)
)
Spacer(modifier = Modifier.width(4.dp))
Text(
if (isExcluded) "Include" else "Exclude",
style = MaterialTheme.typography.bodySmall
)
Text("Replace", style = MaterialTheme.typography.bodySmall)
}
}
}
@@ -926,6 +875,5 @@ private enum class ImageStatus {
VALID,
MULTIPLE_FACES,
NO_FACE,
ERROR,
EXCLUDED
ERROR
}

View File

@@ -44,9 +44,6 @@ data class PersonInfo(
val relationship: String
)
/**
* FIXED TrainViewModel with proper exclude functionality and efficient replace
*/
@HiltViewModel
class TrainViewModel @Inject constructor(
application: Application,
@@ -69,9 +66,6 @@ class TrainViewModel @Inject constructor(
private var currentImageUris: List<Uri> = emptyList()
private val manualFaceSelections = mutableMapOf<Uri, ManualFaceSelection>()
// Track excluded images
private val excludedImages = mutableSetOf<Uri>()
data class ManualFaceSelection(
val faceIndex: Int,
val croppedFaceBitmap: Bitmap
@@ -84,39 +78,6 @@ class TrainViewModel @Inject constructor(
personInfo = PersonInfo(name, dateOfBirth, relationship)
}
/**
* Exclude an image from training
*/
fun excludeImage(uri: Uri) {
excludedImages.add(uri)
val currentState = _uiState.value
if (currentState is ScanningState.Success) {
val updatedResult = applyManualSelections(currentState.sanityCheckResult)
_uiState.value = ScanningState.Success(updatedResult)
}
}
/**
* Include a previously excluded image
*/
fun includeImage(uri: Uri) {
excludedImages.remove(uri)
val currentState = _uiState.value
if (currentState is ScanningState.Success) {
val updatedResult = applyManualSelections(currentState.sanityCheckResult)
_uiState.value = ScanningState.Success(updatedResult)
}
}
/**
* Check if an image is excluded
*/
fun isImageExcluded(uri: Uri): Boolean {
return uri in excludedImages
}
/**
* Create face model with captured person info
*/
@@ -128,7 +89,7 @@ class TrainViewModel @Inject constructor(
}
val validImages = currentState.sanityCheckResult.validImagesWithFaces
if (validImages.size < 15) {
if (validImages.size < 15) { // Updated minimum
_trainingState.value = TrainingState.Error(
"Need at least 15 valid images, have ${validImages.size}"
)
@@ -143,14 +104,16 @@ class TrainViewModel @Inject constructor(
total = validImages.size
)
// Create person with captured info
val person = PersonEntity.create(
name = personName,
dateOfBirth = personInfo?.dateOfBirth,
relationship = personInfo?.relationship
)
// Create person with face model
val personId = faceRecognitionRepository.createPersonWithFaceModel(
person = person,
person = person, // Pass full PersonEntity now
validImages = validImages,
onProgress = { current, total ->
_trainingState.value = TrainingState.Processing(
@@ -182,61 +145,25 @@ class TrainViewModel @Inject constructor(
fun scanAndTagFaces(imageUris: List<Uri>) {
currentImageUris = imageUris
manualFaceSelections.clear()
excludedImages.clear()
performScan(imageUris)
}
/**
* FIXED: Replace image - only rescan the ONE new image, not all images!
*/
fun replaceImage(oldUri: Uri, newUri: Uri) {
viewModelScope.launch {
try {
val currentState = _uiState.value
if (currentState !is ScanningState.Success) return@launch
// Update the URI list
val updatedUris = currentImageUris.toMutableList()
val index = updatedUris.indexOf(oldUri)
if (index == -1) return@launch
val updatedUris = currentImageUris.toMutableList()
val index = updatedUris.indexOf(oldUri)
if (index != -1) {
updatedUris[index] = newUri
currentImageUris = updatedUris
// Clean up old selections/exclusions
manualFaceSelections.remove(oldUri)
excludedImages.remove(oldUri)
// Only scan the NEW image
val newResult = faceDetectionHelper.detectFacesInImage(newUri)
// Update the results list
val updatedFaceResults = currentState.sanityCheckResult.faceDetectionResults.toMutableList()
updatedFaceResults[index] = newResult
// Create updated SanityCheckResult
val updatedSanityResult = currentState.sanityCheckResult.copy(
faceDetectionResults = updatedFaceResults
)
// Apply manual selections and exclusions
val finalResult = applyManualSelections(updatedSanityResult)
_uiState.value = ScanningState.Success(finalResult)
} catch (e: Exception) {
_uiState.value = ScanningState.Error(
e.message ?: "Failed to replace image"
)
performScan(currentImageUris)
}
}
}
/**
* Select face and auto-include the image
*/
fun selectFaceFromImage(imageUri: Uri, faceIndex: Int, croppedFaceBitmap: Bitmap) {
manualFaceSelections[imageUri] = ManualFaceSelection(faceIndex, croppedFaceBitmap)
excludedImages.remove(imageUri) // Auto-include
val currentState = _uiState.value
if (currentState is ScanningState.Success) {
@@ -245,9 +172,6 @@ class TrainViewModel @Inject constructor(
}
}
/**
* Perform full scan with exclusions and progress tracking
*/
private fun performScan(imageUris: List<Uri>) {
viewModelScope.launch {
try {
@@ -255,13 +179,9 @@ class TrainViewModel @Inject constructor(
val result = sanityChecker.performSanityChecks(
imageUris = imageUris,
minImagesRequired = 15,
minImagesRequired = 15, // Updated minimum
allowMultipleFaces = true,
duplicateSimilarityThreshold = 0.95,
excludedImages = excludedImages,
onProgress = { stage, current, total ->
_uiState.value = ScanningState.Processing(current, total)
}
duplicateSimilarityThreshold = 0.95
)
val finalResult = applyManualSelections(result)
@@ -275,14 +195,11 @@ class TrainViewModel @Inject constructor(
}
}
/**
* Apply manual selections with exclusion filtering
*/
private fun applyManualSelections(
result: TrainingSanityChecker.SanityCheckResult
): TrainingSanityChecker.SanityCheckResult {
if (manualFaceSelections.isEmpty() && excludedImages.isEmpty()) {
if (manualFaceSelections.isEmpty()) {
return result
}
@@ -299,36 +216,26 @@ class TrainViewModel @Inject constructor(
}
val updatedValidImages = updatedFaceResults
.filter { it.uri !in excludedImages } // Filter excluded
.filter { it.hasFace }
.filter { it.croppedFaceBitmap != null }
.filter { it.errorMessage == null }
.filter { it.faceCount >= 1 }
.map { faceResult ->
.map { result ->
TrainingSanityChecker.ValidTrainingImage(
uri = faceResult.uri,
croppedFaceBitmap = faceResult.croppedFaceBitmap!!,
faceCount = faceResult.faceCount
uri = result.uri,
croppedFaceBitmap = result.croppedFaceBitmap!!,
faceCount = result.faceCount
)
}
val updatedErrors = result.validationErrors.toMutableList()
// Remove errors for manually selected faces or excluded images
updatedErrors.removeAll { error ->
when (error) {
is TrainingSanityChecker.ValidationError.MultipleFacesDetected ->
manualFaceSelections.containsKey(error.uri) || excludedImages.contains(error.uri)
is TrainingSanityChecker.ValidationError.NoFaceDetected ->
error.uris.any { excludedImages.contains(it) }
is TrainingSanityChecker.ValidationError.ImageLoadError ->
excludedImages.contains(error.uri)
else -> false
}
error is TrainingSanityChecker.ValidationError.MultipleFacesDetected &&
manualFaceSelections.containsKey(error.uri)
}
// Update insufficient images error
if (updatedValidImages.size < 15) {
if (updatedValidImages.size < 15) { // Updated minimum
if (updatedErrors.none { it is TrainingSanityChecker.ValidationError.InsufficientImages }) {
updatedErrors.add(
TrainingSanityChecker.ValidationError.InsufficientImages(
@@ -347,8 +254,7 @@ class TrainViewModel @Inject constructor(
isValid = isValid,
faceDetectionResults = updatedFaceResults,
validationErrors = updatedErrors,
validImagesWithFaces = updatedValidImages,
excludedImages = excludedImages
validImagesWithFaces = updatedValidImages
)
}
@@ -361,7 +267,6 @@ class TrainViewModel @Inject constructor(
_trainingState.value = TrainingState.Idle
currentImageUris = emptyList()
manualFaceSelections.clear()
excludedImages.clear()
personInfo = null
}
@@ -398,8 +303,7 @@ private fun TrainingSanityChecker.SanityCheckResult.copy(
duplicateCheckResult: DuplicateImageDetector.DuplicateCheckResult = this.duplicateCheckResult,
validationErrors: List<TrainingSanityChecker.ValidationError> = this.validationErrors,
warnings: List<String> = this.warnings,
validImagesWithFaces: List<TrainingSanityChecker.ValidTrainingImage> = this.validImagesWithFaces,
excludedImages: Set<Uri> = this.excludedImages
validImagesWithFaces: List<TrainingSanityChecker.ValidTrainingImage> = this.validImagesWithFaces
): TrainingSanityChecker.SanityCheckResult {
return TrainingSanityChecker.SanityCheckResult(
isValid = isValid,
@@ -407,7 +311,6 @@ private fun TrainingSanityChecker.SanityCheckResult.copy(
duplicateCheckResult = duplicateCheckResult,
validationErrors = validationErrors,
warnings = warnings,
validImagesWithFaces = validImagesWithFaces,
excludedImages = excludedImages
validImagesWithFaces = validImagesWithFaces
)
}

View File

@@ -5,12 +5,7 @@ import android.graphics.Bitmap
import android.net.Uri
/**
* ENHANCED TrainingSanityChecker
*
* New features:
* - Progress callbacks
* - Exclude functionality
* - Faster processing
* Coordinates sanity checks for training images
*/
class TrainingSanityChecker(private val context: Context) {
@@ -23,8 +18,7 @@ class TrainingSanityChecker(private val context: Context) {
val duplicateCheckResult: DuplicateImageDetector.DuplicateCheckResult,
val validationErrors: List<ValidationError>,
val warnings: List<String>,
val validImagesWithFaces: List<ValidTrainingImage>,
val excludedImages: Set<Uri> = emptySet() // NEW: Track excluded images
val validImagesWithFaces: List<ValidTrainingImage>
)
data class ValidTrainingImage(
@@ -42,42 +36,30 @@ class TrainingSanityChecker(private val context: Context) {
}
/**
* Perform comprehensive sanity checks with PROGRESS tracking
* Perform comprehensive sanity checks on training images
*/
suspend fun performSanityChecks(
imageUris: List<Uri>,
minImagesRequired: Int = 15,
minImagesRequired: Int = 10,
allowMultipleFaces: Boolean = false,
duplicateSimilarityThreshold: Double = 0.95,
excludedImages: Set<Uri> = emptySet(), // NEW: Allow excluding images
onProgress: ((String, Int, Int) -> Unit)? = null // NEW: Progress callback
duplicateSimilarityThreshold: Double = 0.95
): SanityCheckResult {
val validationErrors = mutableListOf<ValidationError>()
val warnings = mutableListOf<String>()
// Filter out excluded images
val activeImages = imageUris.filter { it !in excludedImages }
// Check minimum image count (AFTER exclusions)
if (activeImages.size < minImagesRequired) {
// Check minimum image count
if (imageUris.size < minImagesRequired) {
validationErrors.add(
ValidationError.InsufficientImages(
required = minImagesRequired,
available = activeImages.size
available = imageUris.size
)
)
}
// Step 1: Detect faces in all images (WITH PROGRESS)
onProgress?.invoke("Detecting faces...", 0, activeImages.size)
val faceDetectionResults = faceDetectionHelper.detectFacesInImages(
uris = activeImages,
onProgress = { current, total ->
onProgress?.invoke("Detecting faces...", current, total)
}
)
// Step 1: Detect faces in all images
val faceDetectionResults = faceDetectionHelper.detectFacesInImages(imageUris)
// Check for images without faces
val imagesWithoutFaces = faceDetectionResults.filter { !it.hasFace }
@@ -116,10 +98,8 @@ class TrainingSanityChecker(private val context: Context) {
}
// Step 2: Check for duplicate images
onProgress?.invoke("Checking for duplicates...", activeImages.size, activeImages.size)
val duplicateCheckResult = duplicateDetector.checkForDuplicates(
uris = activeImages,
uris = imageUris,
similarityThreshold = duplicateSimilarityThreshold
)
@@ -158,16 +138,13 @@ class TrainingSanityChecker(private val context: Context) {
val isValid = validationErrors.isEmpty() && validImagesWithFaces.size >= minImagesRequired
onProgress?.invoke("Analysis complete", activeImages.size, activeImages.size)
return SanityCheckResult(
isValid = isValid,
faceDetectionResults = faceDetectionResults,
duplicateCheckResult = duplicateCheckResult,
validationErrors = validationErrors,
warnings = warnings,
validImagesWithFaces = validImagesWithFaces,
excludedImages = excludedImages
validImagesWithFaces = validImagesWithFaces
)
}
@@ -179,20 +156,24 @@ class TrainingSanityChecker(private val context: Context) {
when (error) {
is ValidationError.NoFaceDetected -> {
val count = error.uris.size
"No face detected in $count image(s)"
val images = error.uris.joinToString(", ") { it.lastPathSegment ?: "Unknown" }
"No face detected in $count image(s): $images"
}
is ValidationError.MultipleFacesDetected -> {
"Multiple faces (${error.faceCount}) detected in: ${error.uri.lastPathSegment}"
}
is ValidationError.DuplicateImages -> {
val count = error.groups.size
"Found $count duplicate group(s)"
val details = error.groups.joinToString("\n") { group ->
" - ${group.images.size} duplicates: ${group.images.joinToString(", ") { it.lastPathSegment ?: "Unknown" }}"
}
"Found $count duplicate group(s):\n$details"
}
is ValidationError.InsufficientImages -> {
"Need ${error.required} images, have ${error.available}"
"Insufficient images: need ${error.required}, but only ${error.available} valid images available"
}
is ValidationError.ImageLoadError -> {
"Failed to load image: ${error.uri.lastPathSegment}"
"Failed to load image ${error.uri.lastPathSegment}: ${error.error}"
}
}
}

View File

@@ -1,447 +0,0 @@
package com.placeholder.sherpai2.ui.utilities
import androidx.compose.foundation.layout.*
import androidx.compose.foundation.lazy.LazyColumn
import androidx.compose.foundation.shape.RoundedCornerShape
import androidx.compose.material.icons.Icons
import androidx.compose.material.icons.filled.*
import androidx.compose.material3.*
import androidx.compose.runtime.*
import androidx.compose.ui.Alignment
import androidx.compose.ui.Modifier
import androidx.compose.ui.graphics.vector.ImageVector
import androidx.compose.ui.text.font.FontWeight
import androidx.compose.ui.unit.dp
import androidx.hilt.navigation.compose.hiltViewModel
import androidx.lifecycle.compose.collectAsStateWithLifecycle
/**
* PhotoUtilitiesScreen - Manage photo collection
*
* Features:
* - Manual photo scan
* - Duplicate detection
* - Burst detection
* - Quality analysis
*/
@OptIn(ExperimentalMaterial3Api::class)
@Composable
fun PhotoUtilitiesScreen(
viewModel: PhotoUtilitiesViewModel = hiltViewModel()
) {
val uiState by viewModel.uiState.collectAsStateWithLifecycle()
val scanProgress by viewModel.scanProgress.collectAsStateWithLifecycle()
Scaffold(
topBar = {
TopAppBar(
title = {
Column {
Text(
"Photo Utilities",
style = MaterialTheme.typography.titleLarge,
fontWeight = FontWeight.Bold
)
Text(
"Manage your photo collection",
style = MaterialTheme.typography.bodySmall,
color = MaterialTheme.colorScheme.onSurfaceVariant
)
}
},
colors = TopAppBarDefaults.topAppBarColors(
containerColor = MaterialTheme.colorScheme.primaryContainer.copy(alpha = 0.5f)
)
)
}
) { paddingValues ->
LazyColumn(
modifier = Modifier
.fillMaxSize()
.padding(paddingValues),
contentPadding = PaddingValues(16.dp),
verticalArrangement = Arrangement.spacedBy(16.dp)
) {
// Section: Scan & Import
item {
SectionHeader(
title = "Scan & Import",
icon = Icons.Default.Scanner
)
}
item {
UtilityCard(
title = "Scan for Photos",
description = "Search your device for new photos",
icon = Icons.Default.PhotoLibrary,
buttonText = "Scan Now",
enabled = uiState !is UtilitiesUiState.Scanning,
onClick = { viewModel.scanForPhotos() }
)
}
// Section: Organization
item {
Spacer(Modifier.height(8.dp))
SectionHeader(
title = "Organization",
icon = Icons.Default.Folder
)
}
item {
UtilityCard(
title = "Detect Duplicates",
description = "Find and tag duplicate photos",
icon = Icons.Default.FileCopy,
buttonText = "Find Duplicates",
enabled = uiState !is UtilitiesUiState.Scanning,
onClick = { viewModel.detectDuplicates() }
)
}
item {
UtilityCard(
title = "Detect Bursts",
description = "Group photos taken in rapid succession (3+ in 2 seconds)",
icon = Icons.Default.BurstMode,
buttonText = "Find Bursts",
enabled = uiState !is UtilitiesUiState.Scanning,
onClick = { viewModel.detectBursts() }
)
}
// Section: Quality
item {
Spacer(Modifier.height(8.dp))
SectionHeader(
title = "Quality Analysis",
icon = Icons.Default.HighQuality
)
}
item {
UtilityCard(
title = "Find Screenshots & Blurry",
description = "Identify screenshots and low-quality photos",
icon = Icons.Default.PhoneAndroid,
buttonText = "Analyze",
enabled = uiState !is UtilitiesUiState.Scanning,
onClick = { viewModel.analyzeQuality() }
)
}
// Progress indicator
if (scanProgress != null) {
item {
ProgressCard(scanProgress!!)
}
}
// Results
when (val state = uiState) {
is UtilitiesUiState.ScanComplete -> {
item {
ResultCard(
title = "Scan Complete",
message = state.message,
icon = Icons.Default.CheckCircle,
iconTint = MaterialTheme.colorScheme.primary
)
}
}
is UtilitiesUiState.DuplicatesFound -> {
item {
ResultCard(
title = "Duplicates Found",
message = "Found ${state.groups.size} groups of duplicates (${state.groups.sumOf { it.images.size - 1 }} duplicate photos)",
icon = Icons.Default.Info,
iconTint = MaterialTheme.colorScheme.tertiary
)
}
}
is UtilitiesUiState.BurstsFound -> {
item {
ResultCard(
title = "Bursts Found",
message = "Found ${state.groups.size} burst sequences (${state.groups.sumOf { it.images.size }} photos total)",
icon = Icons.Default.Info,
iconTint = MaterialTheme.colorScheme.tertiary
)
}
}
is UtilitiesUiState.QualityAnalysisComplete -> {
item {
ResultCard(
title = "Analysis Complete",
message = "Screenshots: ${state.screenshots}\nBlurry: ${state.blurry}",
icon = Icons.Default.CheckCircle,
iconTint = MaterialTheme.colorScheme.primary
)
}
}
is UtilitiesUiState.Error -> {
item {
ResultCard(
title = "Error",
message = state.message,
icon = Icons.Default.Error,
iconTint = MaterialTheme.colorScheme.error
)
}
}
else -> {}
}
// Info card
item {
Spacer(Modifier.height(8.dp))
InfoCard()
}
}
}
}
@Composable
private fun SectionHeader(
title: String,
icon: ImageVector
) {
Row(
verticalAlignment = Alignment.CenterVertically,
horizontalArrangement = Arrangement.spacedBy(8.dp),
modifier = Modifier.padding(vertical = 8.dp)
) {
Icon(
icon,
contentDescription = null,
tint = MaterialTheme.colorScheme.primary,
modifier = Modifier.size(24.dp)
)
Text(
text = title,
style = MaterialTheme.typography.titleMedium,
fontWeight = FontWeight.Bold,
color = MaterialTheme.colorScheme.primary
)
}
}
@Composable
private fun UtilityCard(
title: String,
description: String,
icon: ImageVector,
buttonText: String,
enabled: Boolean,
onClick: () -> Unit
) {
Card(
modifier = Modifier.fillMaxWidth(),
elevation = CardDefaults.cardElevation(defaultElevation = 2.dp)
) {
Row(
modifier = Modifier
.fillMaxWidth()
.padding(16.dp),
horizontalArrangement = Arrangement.spacedBy(16.dp),
verticalAlignment = Alignment.CenterVertically
) {
// Icon
Surface(
shape = RoundedCornerShape(12.dp),
color = MaterialTheme.colorScheme.primaryContainer,
modifier = Modifier.size(56.dp)
) {
Box(contentAlignment = Alignment.Center) {
Icon(
icon,
contentDescription = null,
modifier = Modifier.size(32.dp),
tint = MaterialTheme.colorScheme.primary
)
}
}
// Text
Column(
modifier = Modifier.weight(1f),
verticalArrangement = Arrangement.spacedBy(4.dp)
) {
Text(
text = title,
style = MaterialTheme.typography.titleMedium,
fontWeight = FontWeight.SemiBold
)
Text(
text = description,
style = MaterialTheme.typography.bodySmall,
color = MaterialTheme.colorScheme.onSurfaceVariant
)
}
// Button
Button(
onClick = onClick,
enabled = enabled
) {
Text(buttonText)
}
}
}
}
@Composable
private fun ProgressCard(progress: ScanProgress) {
Card(
modifier = Modifier.fillMaxWidth(),
colors = CardDefaults.cardColors(
containerColor = MaterialTheme.colorScheme.secondaryContainer
)
) {
Column(
modifier = Modifier
.fillMaxWidth()
.padding(16.dp),
verticalArrangement = Arrangement.spacedBy(12.dp)
) {
Row(
modifier = Modifier.fillMaxWidth(),
horizontalArrangement = Arrangement.SpaceBetween
) {
Text(
text = progress.message,
style = MaterialTheme.typography.bodyMedium,
fontWeight = FontWeight.Medium
)
if (progress.total > 0) {
Text(
text = "${progress.current} / ${progress.total}",
style = MaterialTheme.typography.bodySmall,
color = MaterialTheme.colorScheme.primary
)
}
}
if (progress.total > 0) {
LinearProgressIndicator(
progress = { progress.current.toFloat() / progress.total.toFloat() },
modifier = Modifier.fillMaxWidth()
)
} else {
LinearProgressIndicator(
modifier = Modifier.fillMaxWidth()
)
}
}
}
}
@Composable
private fun ResultCard(
title: String,
message: String,
icon: ImageVector,
iconTint: androidx.compose.ui.graphics.Color
) {
Card(
modifier = Modifier.fillMaxWidth(),
colors = CardDefaults.cardColors(
containerColor = iconTint.copy(alpha = 0.1f)
)
) {
Row(
modifier = Modifier
.fillMaxWidth()
.padding(16.dp),
horizontalArrangement = Arrangement.spacedBy(16.dp),
verticalAlignment = Alignment.CenterVertically
) {
Icon(
icon,
contentDescription = null,
tint = iconTint,
modifier = Modifier.size(32.dp)
)
Column(
verticalArrangement = Arrangement.spacedBy(4.dp)
) {
Text(
text = title,
style = MaterialTheme.typography.titleMedium,
fontWeight = FontWeight.Bold
)
Text(
text = message,
style = MaterialTheme.typography.bodyMedium
)
}
}
}
}
@Composable
private fun InfoCard() {
Card(
modifier = Modifier.fillMaxWidth(),
colors = CardDefaults.cardColors(
containerColor = MaterialTheme.colorScheme.surfaceVariant
)
) {
Column(
modifier = Modifier
.fillMaxWidth()
.padding(16.dp),
verticalArrangement = Arrangement.spacedBy(12.dp)
) {
Row(
horizontalArrangement = Arrangement.spacedBy(8.dp),
verticalAlignment = Alignment.CenterVertically
) {
Icon(
Icons.Default.Info,
contentDescription = null,
tint = MaterialTheme.colorScheme.primary
)
Text(
text = "How It Works",
style = MaterialTheme.typography.titleSmall,
fontWeight = FontWeight.Bold
)
}
InfoItem(
"Duplicates",
"Finds exact duplicates by comparing file content"
)
InfoItem(
"Bursts",
"Groups 3+ photos taken within 2 seconds. Tags one as 'representative' for albums"
)
InfoItem(
"Quality",
"Detects screenshots by screen dimensions. Blurry detection coming soon"
)
}
}
}
@Composable
private fun InfoItem(title: String, description: String) {
Column(
verticalArrangement = Arrangement.spacedBy(2.dp)
) {
Text(
text = "$title",
style = MaterialTheme.typography.bodyMedium,
fontWeight = FontWeight.Medium
)
Text(
text = description,
style = MaterialTheme.typography.bodySmall,
color = MaterialTheme.colorScheme.onSurfaceVariant,
modifier = Modifier.padding(start = 12.dp)
)
}
}

View File

@@ -1,384 +0,0 @@
package com.placeholder.sherpai2.ui.utilities
import android.graphics.Bitmap
import androidx.lifecycle.ViewModel
import androidx.lifecycle.viewModelScope
import com.placeholder.sherpai2.data.local.dao.ImageDao
import com.placeholder.sherpai2.data.local.dao.ImageTagDao
import com.placeholder.sherpai2.data.local.dao.TagDao
import com.placeholder.sherpai2.data.local.entity.ImageEntity
import com.placeholder.sherpai2.data.local.entity.ImageTagEntity
import com.placeholder.sherpai2.data.local.entity.TagEntity
import com.placeholder.sherpai2.domain.repository.ImageRepository
import dagger.hilt.android.lifecycle.HiltViewModel
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.flow.MutableStateFlow
import kotlinx.coroutines.flow.StateFlow
import kotlinx.coroutines.flow.asStateFlow
import kotlinx.coroutines.launch
import kotlinx.coroutines.withContext
import java.util.UUID
import javax.inject.Inject
import kotlin.math.abs
/**
* PhotoUtilitiesViewModel - Photo collection management
*
* Features:
* 1. Manual photo scan/rescan
* 2. Duplicate detection (SHA256 + perceptual hash)
* 3. Burst detection (photos within 2 seconds)
* 4. Quality analysis (blurry, screenshots)
*/
@HiltViewModel
class PhotoUtilitiesViewModel @Inject constructor(
private val imageRepository: ImageRepository,
private val imageDao: ImageDao,
private val tagDao: TagDao,
private val imageTagDao: ImageTagDao
) : ViewModel() {
private val _uiState = MutableStateFlow<UtilitiesUiState>(UtilitiesUiState.Idle)
val uiState: StateFlow<UtilitiesUiState> = _uiState.asStateFlow()
private val _scanProgress = MutableStateFlow<ScanProgress?>(null)
val scanProgress: StateFlow<ScanProgress?> = _scanProgress.asStateFlow()
/**
* Manual scan for new photos
*/
fun scanForPhotos() {
viewModelScope.launch(Dispatchers.IO) {
try {
_uiState.value = UtilitiesUiState.Scanning("photos")
_scanProgress.value = ScanProgress("Scanning device...", 0, 0)
val beforeCount = imageDao.getImageCount()
imageRepository.ingestImagesWithProgress { current, total ->
_scanProgress.value = ScanProgress(
"Found $current photos...",
current,
total
)
}
val afterCount = imageDao.getImageCount()
val newPhotos = afterCount - beforeCount
withContext(Dispatchers.Main) {
_uiState.value = UtilitiesUiState.ScanComplete(
"Found $newPhotos new photos",
newPhotos
)
_scanProgress.value = null
}
} catch (e: Exception) {
withContext(Dispatchers.Main) {
_uiState.value = UtilitiesUiState.Error(
e.message ?: "Failed to scan photos"
)
_scanProgress.value = null
}
}
}
}
/**
* Detect duplicate photos
*/
fun detectDuplicates() {
viewModelScope.launch(Dispatchers.IO) {
try {
_uiState.value = UtilitiesUiState.Scanning("duplicates")
_scanProgress.value = ScanProgress("Analyzing photos...", 0, 0)
val allImages = imageDao.getAllImages()
val duplicateGroups = mutableListOf<DuplicateGroup>()
// Group by SHA256
val sha256Groups = allImages.groupBy { it.sha256 }
var processed = 0
sha256Groups.forEach { (sha256, images) ->
if (images.size > 1) {
// Found duplicates!
duplicateGroups.add(
DuplicateGroup(
images = images,
reason = "Exact duplicate (same file content)",
confidence = 1.0f
)
)
}
processed++
if (processed % 100 == 0) {
_scanProgress.value = ScanProgress(
"Checked $processed photos...",
processed,
sha256Groups.size
)
}
}
// Tag duplicates
val duplicateTag = getOrCreateTag("duplicate", "SYSTEM")
duplicateGroups.forEach { group ->
// Tag all but the first image (keep one, mark rest as dupes)
group.images.drop(1).forEach { image ->
tagImage(image.imageId, duplicateTag.tagId)
}
}
withContext(Dispatchers.Main) {
_uiState.value = UtilitiesUiState.DuplicatesFound(duplicateGroups)
_scanProgress.value = null
}
} catch (e: Exception) {
withContext(Dispatchers.Main) {
_uiState.value = UtilitiesUiState.Error(
e.message ?: "Failed to detect duplicates"
)
_scanProgress.value = null
}
}
}
}
/**
* Detect burst photos (rapid succession)
*/
fun detectBursts() {
viewModelScope.launch(Dispatchers.IO) {
try {
_uiState.value = UtilitiesUiState.Scanning("bursts")
_scanProgress.value = ScanProgress("Analyzing timestamps...", 0, 0)
val allImages = imageDao.getAllImagesSortedByTime()
val burstGroups = mutableListOf<BurstGroup>()
// Group photos taken within 2 seconds of each other
val burstThresholdMs = 2000L
var currentBurst = mutableListOf<ImageEntity>()
allImages.forEachIndexed { index, image ->
if (currentBurst.isEmpty()) {
currentBurst.add(image)
} else {
val lastImage = currentBurst.last()
val timeDiff = abs(image.capturedAt - lastImage.capturedAt)
if (timeDiff <= burstThresholdMs) {
// Part of current burst
currentBurst.add(image)
} else {
// End of burst
if (currentBurst.size >= 3) {
// Only consider bursts with 3+ photos
burstGroups.add(
BurstGroup(
images = currentBurst.toList(),
burstId = UUID.randomUUID().toString(),
representativeIndex = currentBurst.size / 2 // Middle photo
)
)
}
currentBurst = mutableListOf(image)
}
}
if (index % 100 == 0) {
_scanProgress.value = ScanProgress(
"Checked $index photos...",
index,
allImages.size
)
}
}
// Check last burst
if (currentBurst.size >= 3) {
burstGroups.add(
BurstGroup(
images = currentBurst,
burstId = UUID.randomUUID().toString(),
representativeIndex = currentBurst.size / 2
)
)
}
// Tag bursts
val burstTag = getOrCreateTag("burst", "SYSTEM")
burstGroups.forEach { group ->
group.images.forEach { image ->
tagImage(image.imageId, burstTag.tagId)
// Tag the representative photo specially
if (image == group.images[group.representativeIndex]) {
val burstRepTag = getOrCreateTag("burst_representative", "SYSTEM")
tagImage(image.imageId, burstRepTag.tagId)
}
}
}
withContext(Dispatchers.Main) {
_uiState.value = UtilitiesUiState.BurstsFound(burstGroups)
_scanProgress.value = null
}
} catch (e: Exception) {
withContext(Dispatchers.Main) {
_uiState.value = UtilitiesUiState.Error(
e.message ?: "Failed to detect bursts"
)
_scanProgress.value = null
}
}
}
}
/**
* Detect screenshots and low quality photos
*/
fun analyzeQuality() {
viewModelScope.launch(Dispatchers.IO) {
try {
_uiState.value = UtilitiesUiState.Scanning("quality")
_scanProgress.value = ScanProgress("Analyzing quality...", 0, 0)
val allImages = imageDao.getAllImages()
val screenshotTag = getOrCreateTag("screenshot", "SYSTEM")
val blurryTag = getOrCreateTag("blurry", "SYSTEM")
var screenshotCount = 0
var blurryCount = 0
allImages.forEachIndexed { index, image ->
// Detect screenshots by dimensions (screen-sized)
val isScreenshot = isLikelyScreenshot(image.width, image.height)
if (isScreenshot) {
tagImage(image.imageId, screenshotTag.tagId)
screenshotCount++
}
// TODO: Detect blurry photos (requires bitmap analysis)
// For now, skip blur detection
if (index % 50 == 0) {
_scanProgress.value = ScanProgress(
"Analyzed $index photos...",
index,
allImages.size
)
}
}
withContext(Dispatchers.Main) {
_uiState.value = UtilitiesUiState.QualityAnalysisComplete(
screenshots = screenshotCount,
blurry = blurryCount
)
_scanProgress.value = null
}
} catch (e: Exception) {
withContext(Dispatchers.Main) {
_uiState.value = UtilitiesUiState.Error(
e.message ?: "Failed to analyze quality"
)
_scanProgress.value = null
}
}
}
}
/**
* Detect screenshots by common screen dimensions
*/
private fun isLikelyScreenshot(width: Int, height: Int): Boolean {
val commonScreenRatios = listOf(
16.0 / 9.0, // 1080x1920, 1440x2560
19.5 / 9.0, // 1080x2340 (iPhone X)
20.0 / 9.0, // 1080x2400
18.5 / 9.0, // 1080x2220
19.0 / 9.0 // 1080x2280
)
val imageRatio = if (width > height) {
width.toDouble() / height.toDouble()
} else {
height.toDouble() / width.toDouble()
}
return commonScreenRatios.any { screenRatio ->
abs(imageRatio - screenRatio) < 0.1
}
}
private suspend fun getOrCreateTag(value: String, type: String): TagEntity {
return tagDao.getByValue(value) ?: run {
val tag = TagEntity(
tagId = UUID.randomUUID().toString(),
type = type,
value = value,
createdAt = System.currentTimeMillis()
)
tagDao.insert(tag)
tag
}
}
private suspend fun tagImage(imageId: String, tagId: String) {
val imageTag = ImageTagEntity(
imageId = imageId,
tagId = tagId,
source = "AUTO",
confidence = 1.0f,
visibility = "PUBLIC",
createdAt = System.currentTimeMillis()
)
imageTagDao.insert(imageTag)
}
fun resetState() {
_uiState.value = UtilitiesUiState.Idle
_scanProgress.value = null
}
}
/**
* UI State
*/
sealed class UtilitiesUiState {
object Idle : UtilitiesUiState()
data class Scanning(val type: String) : UtilitiesUiState()
data class ScanComplete(val message: String, val count: Int) : UtilitiesUiState()
data class DuplicatesFound(val groups: List<DuplicateGroup>) : UtilitiesUiState()
data class BurstsFound(val groups: List<BurstGroup>) : UtilitiesUiState()
data class QualityAnalysisComplete(
val screenshots: Int,
val blurry: Int
) : UtilitiesUiState()
data class Error(val message: String) : UtilitiesUiState()
}
data class ScanProgress(
val message: String,
val current: Int,
val total: Int
)
data class DuplicateGroup(
val images: List<ImageEntity>,
val reason: String,
val confidence: Float
)
data class BurstGroup(
val images: List<ImageEntity>,
val burstId: String,
val representativeIndex: Int // Which photo to show in albums
)